path
stringlengths
14
112
content
stringlengths
0
6.32M
size
int64
0
6.32M
max_lines
int64
1
100k
repo_name
stringclasses
2 values
autogenerated
bool
1 class
cosmopolitan/third_party/intel/rtmintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <rtmintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _RTMINTRIN_H_INCLUDED #define _RTMINTRIN_H_INCLUDED #ifndef __RTM__ #pragma GCC push_options #pragma GCC target("rtm") #define __DISABLE_RTM__ #endif /* __RTM__ */ #define _XBEGIN_STARTED (~0u) #define _XABORT_EXPLICIT (1 << 0) #define _XABORT_RETRY (1 << 1) #define _XABORT_CONFLICT (1 << 2) #define _XABORT_CAPACITY (1 << 3) #define _XABORT_DEBUG (1 << 4) #define _XABORT_NESTED (1 << 5) #define _XABORT_CODE(x) (((x) >> 24) & 0xFF) __funline unsigned int _xbegin(void) { return __builtin_ia32_xbegin(); } __funline void _xend(void) { __builtin_ia32_xend(); } #ifdef __OPTIMIZE__ __funline void _xabort(const unsigned int __imm) { __builtin_ia32_xabort(__imm); } #else #define _xabort(N) __builtin_ia32_xabort(N) #endif /* __OPTIMIZE__ */ #ifdef __DISABLE_RTM__ #undef __DISABLE_RTM__ #pragma GCC pop_options #endif /* __DISABLE_RTM__ */ #endif /* _RTMINTRIN_H_INCLUDED */
1,018
45
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512dqintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512DQINTRIN_H_INCLUDED #define _AVX512DQINTRIN_H_INCLUDED #ifndef __AVX512DQ__ #pragma GCC push_options #pragma GCC target("avx512dq") #define __DISABLE_AVX512DQ__ #endif /* __AVX512DQ__ */ __funline unsigned char _ktest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_ktestcqi(__A, __B); return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); } __funline unsigned char _ktestz_mask8_u8(__mmask8 __A, __mmask8 __B) { return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); } __funline unsigned char _ktestc_mask8_u8(__mmask8 __A, __mmask8 __B) { return (unsigned char)__builtin_ia32_ktestcqi(__A, __B); } __funline unsigned char _ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_ktestchi(__A, __B); return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); } __funline unsigned char _ktestz_mask16_u8(__mmask16 __A, __mmask16 __B) { return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); } __funline unsigned char _ktestc_mask16_u8(__mmask16 __A, __mmask16 __B) { return (unsigned char)__builtin_ia32_ktestchi(__A, __B); } __funline unsigned char _kortest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_kortestcqi(__A, __B); return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); } __funline unsigned char _kortestz_mask8_u8(__mmask8 __A, __mmask8 __B) { return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); } __funline unsigned char _kortestc_mask8_u8(__mmask8 __A, __mmask8 __B) { return (unsigned char)__builtin_ia32_kortestcqi(__A, __B); } __funline __mmask8 _kadd_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask16 _kadd_mask16(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B); } __funline unsigned int _cvtmask8_u32(__mmask8 __A) { return (unsigned int)__builtin_ia32_kmovb((__mmask8)__A); } __funline __mmask8 _cvtu32_mask8(unsigned int __A) { return (__mmask8)__builtin_ia32_kmovb((__mmask8)__A); } __funline __mmask8 _load_mask8(__mmask8 *__A) { return (__mmask8)__builtin_ia32_kmovb(*(__mmask8 *)__A); } __funline void _store_mask8(__mmask8 *__A, __mmask8 __B) { *(__mmask8 *)__A = __builtin_ia32_kmovb(__B); } __funline __mmask8 _knot_mask8(__mmask8 __A) { return (__mmask8)__builtin_ia32_knotqi((__mmask8)__A); } __funline __mmask8 _kor_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask8 _kxnor_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask8 _kxor_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask8 _kand_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask8 _kandn_mask8(__mmask8 __A, __mmask8 __B) { return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B); } __funline __m512d _mm512_broadcast_f64x2(__m128d __A) { return (__m512d)__builtin_ia32_broadcastf64x2_512_mask( (__v2df)__A, _mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A) { return (__m512d)__builtin_ia32_broadcastf64x2_512_mask((__v2df)__A, (__v8df)__O, __M); } __funline __m512d _mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A) { return (__m512d)__builtin_ia32_broadcastf64x2_512_mask( (__v2df)__A, (__v8df)_mm512_setzero_ps(), __M); } __funline __m512i _mm512_broadcast_i64x2(__m128i __A) { return (__m512i)__builtin_ia32_broadcasti64x2_512_mask( (__v2di)__A, _mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti64x2_512_mask((__v2di)__A, (__v8di)__O, __M); } __funline __m512i _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti64x2_512_mask( (__v2di)__A, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512 _mm512_broadcast_f32x2(__m128 __A) { return (__m512)__builtin_ia32_broadcastf32x2_512_mask( (__v4sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_broadcast_f32x2(__m512 __O, __mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastf32x2_512_mask((__v4sf)__A, (__v16sf)__O, __M); } __funline __m512 _mm512_maskz_broadcast_f32x2(__mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastf32x2_512_mask( (__v4sf)__A, (__v16sf)_mm512_setzero_ps(), __M); } __funline __m512i _mm512_broadcast_i32x2(__m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x2_512_mask( (__v4si)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x2_512_mask((__v4si)__A, (__v16si)__O, __M); } __funline __m512i _mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x2_512_mask( (__v4si)__A, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512 _mm512_broadcast_f32x8(__m256 __A) { return (__m512)__builtin_ia32_broadcastf32x8_512_mask( (__v8sf)__A, _mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A) { return (__m512)__builtin_ia32_broadcastf32x8_512_mask((__v8sf)__A, (__v16sf)__O, __M); } __funline __m512 _mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A) { return (__m512)__builtin_ia32_broadcastf32x8_512_mask( (__v8sf)__A, (__v16sf)_mm512_setzero_ps(), __M); } __funline __m512i _mm512_broadcast_i32x8(__m256i __A) { return (__m512i)__builtin_ia32_broadcasti32x8_512_mask( (__v8si)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A) { return (__m512i)__builtin_ia32_broadcasti32x8_512_mask((__v8si)__A, (__v16si)__O, __M); } __funline __m512i _mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A) { return (__m512i)__builtin_ia32_broadcasti32x8_512_mask( (__v8si)__A, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mullo_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A * (__v8du)__B); } __funline __m512i _mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmullq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmullq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512d _mm512_xor_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_xorpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_xorpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_xorpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_xor_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_xorps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_xorps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_xorps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512d _mm512_or_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_orpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_orpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_orpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_or_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_orps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_orps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_orps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512d _mm512_and_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_and_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512d _mm512_andnot_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andnpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andnpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_andnpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_andnot_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andnps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andnps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_andnps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __mmask16 _mm512_movepi32_mask(__m512i __A) { return (__mmask16)__builtin_ia32_cvtd2mask512((__v16si)__A); } __funline __mmask8 _mm512_movepi64_mask(__m512i __A) { return (__mmask8)__builtin_ia32_cvtq2mask512((__v8di)__A); } __funline __m512i _mm512_movm_epi32(__mmask16 __A) { return (__m512i)__builtin_ia32_cvtmask2d512(__A); } __funline __m512i _mm512_movm_epi64(__mmask8 __A) { return (__m512i)__builtin_ia32_cvtmask2q512(__A); } __funline __m512i _mm512_cvttpd_epi64(__m512d __A) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask( (__v8df)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttpd_epi64(__mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvttpd_epu64(__m512d __A) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask( (__v8df)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttpd_epu64(__mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvttps_epi64(__m256 __A) { return (__m512i)__builtin_ia32_cvttps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttps_epi64(__m512i __W, __mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvttps2qq512_mask( (__v8sf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttps_epi64(__mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvttps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvttps_epu64(__m256 __A) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttps_epu64(__m512i __W, __mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask( (__v8sf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttps_epu64(__mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtpd_epi64(__m512d __A) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask( (__v8df)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtpd_epi64(__mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtpd_epu64(__m512d __A) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask( (__v8df)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtpd_epu64(__mmask8 __U, __m512d __A) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtps_epi64(__m256 __A) { return (__m512i)__builtin_ia32_cvtps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtps_epi64(__m512i __W, __mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvtps2qq512_mask( (__v8sf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtps_epi64(__mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvtps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtps_epu64(__m256 __A) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtps_epu64(__m512i __W, __mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask( (__v8sf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtps_epu64(__mmask8 __U, __m256 __A) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_cvtepi64_ps(__m512i __A) { return (__m256)__builtin_ia32_cvtqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_mask_cvtepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) { return (__m256)__builtin_ia32_cvtqq2ps512_mask( (__v8di)__A, (__v8sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_maskz_cvtepi64_ps(__mmask8 __U, __m512i __A) { return (__m256)__builtin_ia32_cvtqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_cvtepu64_ps(__m512i __A) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_mask_cvtepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask( (__v8di)__A, (__v8sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_maskz_cvtepu64_ps(__mmask8 __U, __m512i __A) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_cvtepi64_pd(__m512i __A) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_cvtepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask( (__v8di)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_cvtepi64_pd(__mmask8 __U, __m512i __A) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_cvtepu64_pd(__m512i __A) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_cvtepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask( (__v8di)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_cvtepu64_pd(__mmask8 __U, __m512i __A) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } #ifdef __OPTIMIZE__ __funline __mmask8 _kshiftli_mask8(__mmask8 __A, unsigned int __B) { return (__mmask8)__builtin_ia32_kshiftliqi((__mmask8)__A, (__mmask8)__B); } __funline __mmask8 _kshiftri_mask8(__mmask8 __A, unsigned int __B) { return (__mmask8)__builtin_ia32_kshiftriqi((__mmask8)__A, (__mmask8)__B); } __funline __m512d _mm512_range_pd(__m512d __A, __m512d __B, int __C) { return (__m512d)__builtin_ia32_rangepd512_mask( (__v8df)__A, (__v8df)__B, __C, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_range_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, int __C) { return (__m512d)__builtin_ia32_rangepd512_mask((__v8df)__A, (__v8df)__B, __C, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_range_pd(__mmask8 __U, __m512d __A, __m512d __B, int __C) { return (__m512d)__builtin_ia32_rangepd512_mask( (__v8df)__A, (__v8df)__B, __C, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_range_ps(__m512 __A, __m512 __B, int __C) { return (__m512)__builtin_ia32_rangeps512_mask( (__v16sf)__A, (__v16sf)__B, __C, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_range_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, int __C) { return (__m512)__builtin_ia32_rangeps512_mask((__v16sf)__A, (__v16sf)__B, __C, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_range_ps(__mmask16 __U, __m512 __A, __m512 __B, int __C) { return (__m512)__builtin_ia32_rangeps512_mask( (__v16sf)__A, (__v16sf)__B, __C, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_reduce_sd(__m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_reducesd_mask( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_reduce_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_reducesd_mask((__v2df)__A, (__v2df)__B, __C, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_reduce_sd(__mmask8 __U, __m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_reducesd_mask( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_reduce_ss(__m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_reducess_mask( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_reduce_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_reducess_mask((__v4sf)__A, (__v4sf)__B, __C, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_reduce_ss(__mmask8 __U, __m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_reducess_mask( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_range_sd(__m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_range_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_range_sd(__mmask8 __U, __m128d __A, __m128d __B, int __C) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_range_ss(__m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_range_ss(__mmask8 __U, __m128 __A, __m128 __B, int __C) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_range_round_sd(__m128d __A, __m128d __B, int __C, const int __R) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)-1, __R); } __funline __m128d _mm_mask_range_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C, const int __R) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_range_round_sd(__mmask8 __U, __m128d __A, __m128d __B, int __C, const int __R) { return (__m128d)__builtin_ia32_rangesd128_mask_round( (__v2df)__A, (__v2df)__B, __C, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_range_round_ss(__m128 __A, __m128 __B, int __C, const int __R) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)-1, __R); } __funline __m128 _mm_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C, const int __R) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_range_round_ss(__mmask8 __U, __m128 __A, __m128 __B, int __C, const int __R) { return (__m128)__builtin_ia32_rangess128_mask_round( (__v4sf)__A, (__v4sf)__B, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } __funline __mmask8 _mm_fpclass_ss_mask(__m128 __A, const int __imm) { return (__mmask8)__builtin_ia32_fpclassss((__v4sf)__A, __imm); } __funline __mmask8 _mm_fpclass_sd_mask(__m128d __A, const int __imm) { return (__mmask8)__builtin_ia32_fpclasssd((__v2df)__A, __imm); } __funline __m512i _mm512_cvtt_roundpd_epi64(__m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundpd_epi64(__mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvtt_roundpd_epu64(__m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundpd_epu64(__mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvttpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvtt_roundps_epi64(__m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundps_epi64(__mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvtt_roundps_epu64(__m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundps_epu64(__mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvt_roundpd_epi64(__m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundpd_epi64(__mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2qq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvt_roundpd_epu64(__m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundpd_epu64(__mmask8 __U, __m512d __A, const int __R) { return (__m512i)__builtin_ia32_cvtpd2uqq512_mask( (__v8df)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvt_roundps_epi64(__m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundps_epi64(__mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2qq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m512i _mm512_cvt_roundps_epu64(__m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1, __R); } __funline __m512i _mm512_mask_cvt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)__A, (__v8di)__W, (__mmask8)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundps_epu64(__mmask8 __U, __m256 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2uqq512_mask( (__v8sf)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U, __R); } __funline __m256 _mm512_cvt_roundepi64_ps(__m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1, __R); } __funline __m256 _mm512_mask_cvt_roundepi64_ps(__m256 __W, __mmask8 __U, __m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)__A, (__v8sf)__W, (__mmask8)__U, __R); } __funline __m256 _mm512_maskz_cvt_roundepi64_ps(__mmask8 __U, __m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, __R); } __funline __m256 _mm512_cvt_roundepu64_ps(__m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1, __R); } __funline __m256 _mm512_mask_cvt_roundepu64_ps(__m256 __W, __mmask8 __U, __m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)__A, (__v8sf)__W, (__mmask8)__U, __R); } __funline __m256 _mm512_maskz_cvt_roundepu64_ps(__mmask8 __U, __m512i __A, const int __R) { return (__m256)__builtin_ia32_cvtuqq2ps512_mask( (__v8di)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, __R); } __funline __m512d _mm512_cvt_roundepi64_pd(__m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_cvt_roundepi64_pd(__m512d __W, __mmask8 __U, __m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)__A, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_cvt_roundepi64_pd(__mmask8 __U, __m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512d _mm512_cvt_roundepu64_pd(__m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_cvt_roundepu64_pd(__m512d __W, __mmask8 __U, __m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)__A, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_cvt_roundepu64_pd(__mmask8 __U, __m512i __A, const int __R) { return (__m512d)__builtin_ia32_cvtuqq2pd512_mask( (__v8di)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512d _mm512_reduce_pd(__m512d __A, int __B) { return (__m512d)__builtin_ia32_reducepd512_mask( (__v8df)__A, __B, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_reduce_pd(__m512d __W, __mmask8 __U, __m512d __A, int __B) { return (__m512d)__builtin_ia32_reducepd512_mask((__v8df)__A, __B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_reduce_pd(__mmask8 __U, __m512d __A, int __B) { return (__m512d)__builtin_ia32_reducepd512_mask( (__v8df)__A, __B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_reduce_ps(__m512 __A, int __B) { return (__m512)__builtin_ia32_reduceps512_mask( (__v16sf)__A, __B, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_reduce_ps(__m512 __W, __mmask16 __U, __m512 __A, int __B) { return (__m512)__builtin_ia32_reduceps512_mask((__v16sf)__A, __B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_reduce_ps(__mmask16 __U, __m512 __A, int __B) { return (__m512)__builtin_ia32_reduceps512_mask( (__v16sf)__A, __B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m256 _mm512_extractf32x8_ps(__m512 __A, const int __imm) { return (__m256)__builtin_ia32_extractf32x8_mask( (__v16sf)__A, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A, const int __imm) { return (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)__A, __imm, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A, const int __imm) { return (__m256)__builtin_ia32_extractf32x8_mask( (__v16sf)__A, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm512_extractf64x2_pd(__m512d __A, const int __imm) { return (__m128d)__builtin_ia32_extractf64x2_512_mask( (__v8df)__A, __imm, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A, const int __imm) { return (__m128d)__builtin_ia32_extractf64x2_512_mask( (__v8df)__A, __imm, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A, const int __imm) { return (__m128d)__builtin_ia32_extractf64x2_512_mask( (__v8df)__A, __imm, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256i _mm512_extracti32x8_epi32(__m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti32x8_mask( (__v16si)__A, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm512_mask_extracti32x8_epi32(__m256i __W, __mmask8 __U, __m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)__A, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti32x8_mask( (__v16si)__A, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm512_extracti64x2_epi64(__m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti64x2_512_mask( (__v8di)__A, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti64x2_512_mask( (__v8di)__A, __imm, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti64x2_512_mask( (__v8di)__A, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m512d _mm512_range_round_pd(__m512d __A, __m512d __B, int __C, const int __R) { return (__m512d)__builtin_ia32_rangepd512_mask((__v8df)__A, (__v8df)__B, __C, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_range_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, int __C, const int __R) { return (__m512d)__builtin_ia32_rangepd512_mask( (__v8df)__A, (__v8df)__B, __C, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_range_round_pd(__mmask8 __U, __m512d __A, __m512d __B, int __C, const int __R) { return (__m512d)__builtin_ia32_rangepd512_mask((__v8df)__A, (__v8df)__B, __C, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_range_round_ps(__m512 __A, __m512 __B, int __C, const int __R) { return (__m512)__builtin_ia32_rangeps512_mask((__v16sf)__A, (__v16sf)__B, __C, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_range_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, int __C, const int __R) { return (__m512)__builtin_ia32_rangeps512_mask( (__v16sf)__A, (__v16sf)__B, __C, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_range_round_ps(__mmask16 __U, __m512 __A, __m512 __B, int __C, const int __R) { return (__m512)__builtin_ia32_rangeps512_mask((__v16sf)__A, (__v16sf)__B, __C, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512i _mm512_inserti32x8(__m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti32x8_mask( (__v16si)__A, (__v8si)__B, __imm, (__v16si)_mm512_setzero_si512(), (__mmask16)-1); } __funline __m512i _mm512_mask_inserti32x8(__m512i __W, __mmask16 __U, __m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti32x8_mask( (__v16si)__A, (__v8si)__B, __imm, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_inserti32x8(__mmask16 __U, __m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti32x8_mask( (__v16si)__A, (__v8si)__B, __imm, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512 _mm512_insertf32x8(__m512 __A, __m256 __B, const int __imm) { return (__m512)__builtin_ia32_insertf32x8_mask( (__v16sf)__A, (__v8sf)__B, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_insertf32x8(__m512 __W, __mmask16 __U, __m512 __A, __m256 __B, const int __imm) { return (__m512)__builtin_ia32_insertf32x8_mask( (__v16sf)__A, (__v8sf)__B, __imm, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_insertf32x8(__mmask16 __U, __m512 __A, __m256 __B, const int __imm) { return (__m512)__builtin_ia32_insertf32x8_mask( (__v16sf)__A, (__v8sf)__B, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512i _mm512_inserti64x2(__m512i __A, __m128i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x2_512_mask( (__v8di)__A, (__v2di)__B, __imm, (__v8di)_mm512_setzero_si512(), (__mmask8)-1); } __funline __m512i _mm512_mask_inserti64x2(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x2_512_mask( (__v8di)__A, (__v2di)__B, __imm, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_inserti64x2(__mmask8 __U, __m512i __A, __m128i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x2_512_mask( (__v8di)__A, (__v2di)__B, __imm, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512d _mm512_insertf64x2(__m512d __A, __m128d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x2_512_mask( (__v8df)__A, (__v2df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_insertf64x2(__m512d __W, __mmask8 __U, __m512d __A, __m128d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x2_512_mask( (__v8df)__A, (__v2df)__B, __imm, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_insertf64x2(__mmask8 __U, __m512d __A, __m128d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x2_512_mask( (__v8df)__A, (__v2df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __mmask8 _mm512_mask_fpclass_pd_mask(__mmask8 __U, __m512d __A, const int __imm) { return (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)__A, __imm, __U); } __funline __mmask8 _mm512_fpclass_pd_mask(__m512d __A, const int __imm) { return (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)__A, __imm, (__mmask8)-1); } __funline __mmask16 _mm512_mask_fpclass_ps_mask(__mmask16 __U, __m512 __A, const int __imm) { return (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)__A, __imm, __U); } __funline __mmask16 _mm512_fpclass_ps_mask(__m512 __A, const int __imm) { return (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)__A, __imm, (__mmask16)-1); } #else #define _kshiftli_mask8(X, Y) \ ((__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(X), (__mmask8)(Y))) #define _kshiftri_mask8(X, Y) \ ((__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(X), (__mmask8)(Y))) #define _mm_range_sd(A, B, C) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_range_sd(W, U, A, B, C) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)(__m128d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_range_sd(U, A, B, C) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_range_ss(A, B, C) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_range_ss(W, U, A, B, C) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_range_ss(U, A, B, C) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_range_round_sd(A, B, C, R) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)-1, (R))) #define _mm_mask_range_round_sd(W, U, A, B, C, R) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)(__m128d)(W), (__mmask8)(U), (R))) #define _mm_maskz_range_round_sd(U, A, B, C, R) \ ((__m128d)__builtin_ia32_rangesd128_mask_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)(U), (R))) #define _mm_range_round_ss(A, B, C, R) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, (R))) #define _mm_mask_range_round_ss(W, U, A, B, C, R) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \ (__mmask8)(U), (R))) #define _mm_maskz_range_round_ss(U, A, B, C, R) \ ((__m128)__builtin_ia32_rangess128_mask_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)(U), (R))) #define _mm512_cvtt_roundpd_epi64(A, B) \ ((__m512i)__builtin_ia32_cvttpd2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvtt_roundpd_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttpd2qq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvtt_roundpd_epi64(U, A, B) \ ((__m512i)__builtin_ia32_cvttpd2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvtt_roundpd_epu64(A, B) \ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvtt_roundpd_epu64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvtt_roundpd_epu64(U, A, B) \ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvtt_roundps_epi64(A, B) \ ((__m512i)__builtin_ia32_cvttps2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvtt_roundps_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttps2qq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvtt_roundps_epi64(U, A, B) \ ((__m512i)__builtin_ia32_cvttps2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvtt_roundps_epu64(A, B) \ ((__m512i)__builtin_ia32_cvttps2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvtt_roundps_epu64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttps2uqq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvtt_roundps_epu64(U, A, B) \ ((__m512i)__builtin_ia32_cvttps2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvt_roundpd_epi64(A, B) \ ((__m512i)__builtin_ia32_cvtpd2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvt_roundpd_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtpd2qq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvt_roundpd_epi64(U, A, B) \ ((__m512i)__builtin_ia32_cvtpd2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvt_roundpd_epu64(A, B) \ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvt_roundpd_epu64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvt_roundpd_epu64(U, A, B) \ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvt_roundps_epi64(A, B) \ ((__m512i)__builtin_ia32_cvtps2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvt_roundps_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtps2qq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvt_roundps_epi64(U, A, B) \ ((__m512i)__builtin_ia32_cvtps2qq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvt_roundps_epu64(A, B) \ ((__m512i)__builtin_ia32_cvtps2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), -1, (B))) #define _mm512_mask_cvt_roundps_epu64(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtps2uqq512_mask((A), (__v8di)(W), (U), (B))) #define _mm512_maskz_cvt_roundps_epu64(U, A, B) \ ((__m512i)__builtin_ia32_cvtps2uqq512_mask( \ (A), (__v8di)_mm512_setzero_si512(), (U), (B))) #define _mm512_cvt_roundepi64_ps(A, B) \ ((__m256)__builtin_ia32_cvtqq2ps512_mask( \ (__v8di)(A), (__v8sf)_mm256_setzero_ps(), -1, (B))) #define _mm512_mask_cvt_roundepi64_ps(W, U, A, B) \ ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(A), (W), (U), (B))) #define _mm512_maskz_cvt_roundepi64_ps(U, A, B) \ ((__m256)__builtin_ia32_cvtqq2ps512_mask( \ (__v8di)(A), (__v8sf)_mm256_setzero_ps(), (U), (B))) #define _mm512_cvt_roundepu64_ps(A, B) \ ((__m256)__builtin_ia32_cvtuqq2ps512_mask( \ (__v8di)(A), (__v8sf)_mm256_setzero_ps(), -1, (B))) #define _mm512_mask_cvt_roundepu64_ps(W, U, A, B) \ ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(A), (W), (U), (B))) #define _mm512_maskz_cvt_roundepu64_ps(U, A, B) \ ((__m256)__builtin_ia32_cvtuqq2ps512_mask( \ (__v8di)(A), (__v8sf)_mm256_setzero_ps(), (U), (B))) #define _mm512_cvt_roundepi64_pd(A, B) \ ((__m512d)__builtin_ia32_cvtqq2pd512_mask( \ (__v8di)(A), (__v8df)_mm512_setzero_pd(), -1, (B))) #define _mm512_mask_cvt_roundepi64_pd(W, U, A, B) \ ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(A), (W), (U), (B))) #define _mm512_maskz_cvt_roundepi64_pd(U, A, B) \ ((__m512d)__builtin_ia32_cvtqq2pd512_mask( \ (__v8di)(A), (__v8df)_mm512_setzero_pd(), (U), (B))) #define _mm512_cvt_roundepu64_pd(A, B) \ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask( \ (__v8di)(A), (__v8df)_mm512_setzero_pd(), -1, (B))) #define _mm512_mask_cvt_roundepu64_pd(W, U, A, B) \ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(A), (W), (U), (B))) #define _mm512_maskz_cvt_roundepu64_pd(U, A, B) \ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask( \ (__v8di)(A), (__v8df)_mm512_setzero_pd(), (U), (B))) #define _mm512_reduce_pd(A, B) \ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ (__v8df)_mm512_setzero_pd(), \ (__mmask8)-1)) #define _mm512_mask_reduce_pd(W, U, A, B) \ ((__m512d)__builtin_ia32_reducepd512_mask( \ (__v8df)(__m512d)(A), (int)(B), (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_reduce_pd(U, A, B) \ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ (__v8df)_mm512_setzero_pd(), \ (__mmask8)(U))) #define _mm512_reduce_ps(A, B) \ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ (__v16sf)_mm512_setzero_ps(), \ (__mmask16)-1)) #define _mm512_mask_reduce_ps(W, U, A, B) \ ((__m512)__builtin_ia32_reduceps512_mask( \ (__v16sf)(__m512)(A), (int)(B), (__v16sf)(__m512)(W), (__mmask16)(U))) #define _mm512_maskz_reduce_ps(U, A, B) \ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ (__v16sf)_mm512_setzero_ps(), \ (__mmask16)(U))) #define _mm512_extractf32x8_ps(X, C) \ ((__m256)__builtin_ia32_extractf32x8_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v8sf)(__m256)_mm256_setzero_ps(), \ (__mmask8)-1)) #define _mm512_mask_extractf32x8_ps(W, U, X, C) \ ((__m256)__builtin_ia32_extractf32x8_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v8sf)(__m256)(W), (__mmask8)(U))) #define _mm512_maskz_extractf32x8_ps(U, X, C) \ ((__m256)__builtin_ia32_extractf32x8_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v8sf)(__m256)_mm256_setzero_ps(), \ (__mmask8)(U))) #define _mm512_extractf64x2_pd(X, C) \ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v2df)(__m128d)_mm_setzero_pd(), \ (__mmask8)-1)) #define _mm512_mask_extractf64x2_pd(W, U, X, C) \ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U))) #define _mm512_maskz_extractf64x2_pd(U, X, C) \ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v2df)(__m128d)_mm_setzero_pd(), \ (__mmask8)(U))) #define _mm512_extracti32x8_epi32(X, C) \ ((__m256i)__builtin_ia32_extracti32x8_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)-1)) #define _mm512_mask_extracti32x8_epi32(W, U, X, C) \ ((__m256i)__builtin_ia32_extracti32x8_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm512_maskz_extracti32x8_epi32(U, X, C) \ ((__m256i)__builtin_ia32_extracti32x8_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm512_extracti64x2_epi64(X, C) \ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm512_mask_extracti64x2_epi64(W, U, X, C) \ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm512_maskz_extracti64x2_epi64(U, X, C) \ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm512_range_pd(A, B, C) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)_mm512_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_range_pd(W, U, A, B, C) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_range_pd(U, A, B, C) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)_mm512_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_range_ps(A, B, C) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_range_ps(W, U, A, B, C) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)(__m512)(W), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_range_ps(U, A, B, C) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_range_round_pd(A, B, C, R) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)_mm512_setzero_pd(), (__mmask8)-1, (R))) #define _mm512_mask_range_round_pd(W, U, A, B, C, R) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U), (R))) #define _mm512_maskz_range_round_pd(U, A, B, C, R) \ ((__m512d)__builtin_ia32_rangepd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(B), (int)(C), \ (__v8df)_mm512_setzero_pd(), (__mmask8)(U), (R))) #define _mm512_range_round_ps(A, B, C, R) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, (R))) #define _mm512_mask_range_round_ps(W, U, A, B, C, R) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)(__m512)(W), (__mmask16)(U), (R))) #define _mm512_maskz_range_round_ps(U, A, B, C, R) \ ((__m512)__builtin_ia32_rangeps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(B), (int)(C), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), (R))) #define _mm512_insertf64x2(X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x2_512_mask( \ (__v8df)(__m512d)(X), (__v2df)(__m128d)(Y), (int)(C), \ (__v8df)(__m512d)(X), (__mmask8)-1)) #define _mm512_mask_insertf64x2(W, U, X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x2_512_mask( \ (__v8df)(__m512d)(X), (__v2df)(__m128d)(Y), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_insertf64x2(U, X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x2_512_mask( \ (__v8df)(__m512d)(X), (__v2df)(__m128d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_setzero_pd(), (__mmask8)(U))) #define _mm512_inserti64x2(X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x2_512_mask( \ (__v8di)(__m512i)(X), (__v2di)(__m128i)(Y), (int)(C), \ (__v8di)(__m512i)(X), (__mmask8)-1)) #define _mm512_mask_inserti64x2(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x2_512_mask( \ (__v8di)(__m512i)(X), (__v2di)(__m128i)(Y), (int)(C), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_inserti64x2(U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x2_512_mask( \ (__v8di)(__m512i)(X), (__v2di)(__m128i)(Y), (int)(C), \ (__v8di)(__m512i)_mm512_setzero_si512(), (__mmask8)(U))) #define _mm512_insertf32x8(X, Y, C) \ ((__m512)__builtin_ia32_insertf32x8_mask( \ (__v16sf)(__m512)(X), (__v8sf)(__m256)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_setzero_ps(), (__mmask16)-1)) #define _mm512_mask_insertf32x8(W, U, X, Y, C) \ ((__m512)__builtin_ia32_insertf32x8_mask( \ (__v16sf)(__m512)(X), (__v8sf)(__m256)(Y), (int)(C), \ (__v16sf)(__m512)(W), (__mmask16)(U))) #define _mm512_maskz_insertf32x8(U, X, Y, C) \ ((__m512)__builtin_ia32_insertf32x8_mask( \ (__v16sf)(__m512)(X), (__v8sf)(__m256)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_setzero_ps(), (__mmask16)(U))) #define _mm512_inserti32x8(X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x8_mask( \ (__v16si)(__m512i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)-1)) #define _mm512_mask_inserti32x8(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x8_mask( \ (__v16si)(__m512i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_inserti32x8(U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x8_mask( \ (__v16si)(__m512i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #define _mm_fpclass_ss_mask(X, C) \ ((__mmask8)__builtin_ia32_fpclassss((__v4sf)(__m128)(X), (int)(C))) #define _mm_fpclass_sd_mask(X, C) \ ((__mmask8)__builtin_ia32_fpclasssd((__v2df)(__m128d)(X), (int)(C))) #define _mm512_mask_fpclass_pd_mask(u, X, C) \ ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(X), (int)(C), \ (__mmask8)(u))) #define _mm512_mask_fpclass_ps_mask(u, x, c) \ ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(x), (int)(c), \ (__mmask8)(u))) #define _mm512_fpclass_pd_mask(X, C) \ ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(X), (int)(C), \ (__mmask8)-1)) #define _mm512_fpclass_ps_mask(x, c) \ ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(x), (int)(c), \ (__mmask8)-1)) #define _mm_reduce_sd(A, B, C) \ ((__m128d)__builtin_ia32_reducesd_mask( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)-1)) #define _mm_mask_reduce_sd(W, U, A, B, C) \ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ (__v2df)(__m128d)(B), (int)(C), \ (__v2df)(__m128d)(W), (__mmask8)(U))) #define _mm_maskz_reduce_sd(U, A, B, C) \ ((__m128d)__builtin_ia32_reducesd_mask( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)_mm_setzero_pd(), (__mmask8)(U))) #define _mm_reduce_ss(A, B, C) \ ((__m128)__builtin_ia32_reducess_mask( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)-1)) #define _mm_mask_reduce_ss(W, U, A, B, C) \ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm_maskz_reduce_ss(U, A, B, C) \ ((__m128)__builtin_ia32_reducess_mask( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)_mm_setzero_ps(), (__mmask8)(U))) #endif #ifdef __DISABLE_AVX512DQ__ #undef __DISABLE_AVX512DQ__ #pragma GCC pop_options #endif /* __DISABLE_AVX512DQ__ */ #endif /* _AVX512DQINTRIN_H_INCLUDED */
70,186
1,648
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx5124vnniwintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <avx5124vnniwintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _AVX5124VNNIWINTRIN_H_INCLUDED #define _AVX5124VNNIWINTRIN_H_INCLUDED #ifndef __AVX5124VNNIW__ #pragma GCC push_options #pragma GCC target("avx5124vnniw") #define __DISABLE_AVX5124VNNIW__ #endif /* __AVX5124VNNIW__ */ __funline __m512i _mm512_4dpwssd_epi32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssd((__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F); } __funline __m512i _mm512_mask_4dpwssd_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssd_mask( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F, (__v16si)__A, (__mmask16)__U); } __funline __m512i _mm512_maskz_4dpwssd_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssd_mask( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F, (__v16si)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512i _mm512_4dpwssds_epi32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssds((__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F); } __funline __m512i _mm512_mask_4dpwssds_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssds_mask( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F, (__v16si)__A, (__mmask16)__U); } __funline __m512i _mm512_maskz_4dpwssds_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m128i *__F) { return (__m512i)__builtin_ia32_vp4dpwssds_mask( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__v16si)__E, (__v16si)__A, (const __v4si *)__F, (__v16si)_mm512_setzero_ps(), (__mmask16)__U); } #ifdef __DISABLE_AVX5124VNNIW__ #undef __DISABLE_AVX5124VNNIW__ #pragma GCC pop_options #endif /* __DISABLE_AVX5124VNNIW__ */ #endif /* _AVX5124VNNIWINTRIN_H_INCLUDED */
3,114
70
jart/cosmopolitan
false
cosmopolitan/third_party/intel/popcntintrin.internal.h
#ifndef _POPCNTINTRIN_H_INCLUDED #define _POPCNTINTRIN_H_INCLUDED #ifdef __x86_64__ #ifndef __POPCNT__ #pragma GCC push_options #pragma GCC target("popcnt") #define __DISABLE_POPCNT__ #endif /* __POPCNT__ */ __funline int _mm_popcnt_u32(unsigned int __X) { return __builtin_popcount(__X); } #ifdef __x86_64__ __funline long long _mm_popcnt_u64(unsigned long long __X) { return __builtin_popcountll(__X); } #endif #ifdef __DISABLE_POPCNT__ #undef __DISABLE_POPCNT__ #pragma GCC pop_options #endif /* __DISABLE_POPCNT__ */ #endif /* __x86_64__ */ #endif /* _POPCNTINTRIN_H_INCLUDED */
592
28
jart/cosmopolitan
false
cosmopolitan/third_party/intel/rdseedintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <rdseedintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _RDSEEDINTRIN_H_INCLUDED #define _RDSEEDINTRIN_H_INCLUDED #ifndef __RDSEED__ #pragma GCC push_options #pragma GCC target("rdseed") #define __DISABLE_RDSEED__ #endif /* __RDSEED__ */ __funline int _rdseed16_step(unsigned short *__p) { return __builtin_ia32_rdseed_hi_step(__p); } __funline int _rdseed32_step(unsigned int *__p) { return __builtin_ia32_rdseed_si_step(__p); } #ifdef __x86_64__ __funline int _rdseed64_step(unsigned long long *__p) { return __builtin_ia32_rdseed_di_step(__p); } #endif #ifdef __DISABLE_RDSEED__ #undef __DISABLE_RDSEED__ #pragma GCC pop_options #endif /* __DISABLE_RDSEED__ */ #endif /* _RDSEEDINTRIN_H_INCLUDED */
786
34
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512ifmavlintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512ifmavlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512IFMAVLINTRIN_H_INCLUDED #define _AVX512IFMAVLINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512IFMA__) #pragma GCC push_options #pragma GCC target("avx512ifma,avx512vl") #define __DISABLE_AVX512IFMAVL__ #endif /* __AVX512IFMAVL__ */ __funline __m128i _mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_vpmadd52luq128_mask((__v2di)__X, (__v2di)__Y, (__v2di)__Z, (__mmask8)-1); } __funline __m128i _mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_vpmadd52huq128_mask((__v2di)__X, (__v2di)__Y, (__v2di)__Z, (__mmask8)-1); } __funline __m256i _mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_vpmadd52luq256_mask((__v4di)__X, (__v4di)__Y, (__v4di)__Z, (__mmask8)-1); } __funline __m256i _mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_vpmadd52huq256_mask((__v4di)__X, (__v4di)__Y, (__v4di)__Z, (__mmask8)-1); } __funline __m128i _mm_mask_madd52lo_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_vpmadd52luq128_mask( (__v2di)__W, (__v2di)__X, (__v2di)__Y, (__mmask8)__M); } __funline __m128i _mm_mask_madd52hi_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_vpmadd52huq128_mask( (__v2di)__W, (__v2di)__X, (__v2di)__Y, (__mmask8)__M); } __funline __m256i _mm256_mask_madd52lo_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_vpmadd52luq256_mask( (__v4di)__W, (__v4di)__X, (__v4di)__Y, (__mmask8)__M); } __funline __m256i _mm256_mask_madd52hi_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_vpmadd52huq256_mask( (__v4di)__W, (__v4di)__X, (__v4di)__Y, (__mmask8)__M); } __funline __m128i _mm_maskz_madd52lo_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_vpmadd52luq128_maskz( (__v2di)__X, (__v2di)__Y, (__v2di)__Z, (__mmask8)__M); } __funline __m128i _mm_maskz_madd52hi_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_vpmadd52huq128_maskz( (__v2di)__X, (__v2di)__Y, (__v2di)__Z, (__mmask8)__M); } __funline __m256i _mm256_maskz_madd52lo_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_vpmadd52luq256_maskz( (__v4di)__X, (__v4di)__Y, (__v4di)__Z, (__mmask8)__M); } __funline __m256i _mm256_maskz_madd52hi_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_vpmadd52huq256_maskz( (__v4di)__X, (__v4di)__Y, (__v4di)__Z, (__mmask8)__M); } #ifdef __DISABLE_AVX512IFMAVL__ #undef __DISABLE_AVX512IFMAVL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512IFMAVL__ */ #endif /* _AVX512IFMAVLINTRIN_H_INCLUDED */
3,609
89
jart/cosmopolitan
false
cosmopolitan/third_party/intel/mmintrin.internal.h
#ifndef _MMINTRIN_H_INCLUDED #define _MMINTRIN_H_INCLUDED #ifdef __x86_64__ #if defined __x86_64__ && !defined __SSE__ || !defined __MMX__ #pragma GCC push_options #ifdef __x86_64__ #pragma GCC target("sse,mmx") #else #pragma GCC target("mmx") #endif #define __DISABLE_MMX__ #endif /* __MMX__ */ typedef int __m64 __attribute__((__vector_size__(8), __may_alias__)); typedef int __m64_u __attribute__((__vector_size__(8), __may_alias__, __aligned__(1))); typedef int __v2si __attribute__((__vector_size__(8))); typedef short __v4hi __attribute__((__vector_size__(8))); typedef char __v8qi __attribute__((__vector_size__(8))); typedef long long __v1di __attribute__((__vector_size__(8))); typedef float __v2sf __attribute__((__vector_size__(8))); __funline void _mm_empty(void) { __builtin_ia32_emms(); } __funline void _m_empty(void) { _mm_empty(); } __funline __m64 _mm_cvtsi32_si64(int __i) { return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); } __funline __m64 _m_from_int(int __i) { return _mm_cvtsi32_si64(__i); } #ifdef __x86_64__ __funline __m64 _m_from_int64(long long __i) { return (__m64)__i; } __funline __m64 _mm_cvtsi64_m64(long long __i) { return (__m64)__i; } __funline __m64 _mm_cvtsi64x_si64(long long __i) { return (__m64)__i; } __funline __m64 _mm_set_pi64x(long long __i) { return (__m64)__i; } #endif __funline int _mm_cvtsi64_si32(__m64 __i) { return __builtin_ia32_vec_ext_v2si((__v2si)__i, 0); } __funline int _m_to_int(__m64 __i) { return _mm_cvtsi64_si32(__i); } #ifdef __x86_64__ __funline long long _m_to_int64(__m64 __i) { return (long long)__i; } __funline long long _mm_cvtm64_si64(__m64 __i) { return (long long)__i; } __funline long long _mm_cvtsi64_si64x(__m64 __i) { return (long long)__i; } #endif __funline __m64 _mm_packs_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_packsswb(__m64 __m1, __m64 __m2) { return _mm_packs_pi16(__m1, __m2); } __funline __m64 _mm_packs_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_packssdw(__m64 __m1, __m64 __m2) { return _mm_packs_pi32(__m1, __m2); } __funline __m64 _mm_packs_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_packuswb(__m64 __m1, __m64 __m2) { return _mm_packs_pu16(__m1, __m2); } __funline __m64 _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_punpckhbw(__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi8(__m1, __m2); } __funline __m64 _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_punpckhwd(__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi16(__m1, __m2); } __funline __m64 _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_punpckhdq(__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi32(__m1, __m2); } __funline __m64 _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_punpcklbw(__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi8(__m1, __m2); } __funline __m64 _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_punpcklwd(__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi16(__m1, __m2); } __funline __m64 _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_punpckldq(__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi32(__m1, __m2); } __funline __m64 _mm_add_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_paddb(__m64 __m1, __m64 __m2) { return _mm_add_pi8(__m1, __m2); } __funline __m64 _mm_add_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_paddw(__m64 __m1, __m64 __m2) { return _mm_add_pi16(__m1, __m2); } __funline __m64 _mm_add_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_paddd(__m64 __m1, __m64 __m2) { return _mm_add_pi32(__m1, __m2); } #ifndef __SSE2__ #pragma GCC push_options #pragma GCC target("sse2,mmx") #define __DISABLE_SSE2__ #endif /* __SSE2__ */ __funline __m64 _mm_add_si64(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddq((__v1di)__m1, (__v1di)__m2); } #ifdef __DISABLE_SSE2__ #undef __DISABLE_SSE2__ #pragma GCC pop_options #endif /* __DISABLE_SSE2__ */ __funline __m64 _mm_adds_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_paddsb(__m64 __m1, __m64 __m2) { return _mm_adds_pi8(__m1, __m2); } __funline __m64 _mm_adds_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_paddsw(__m64 __m1, __m64 __m2) { return _mm_adds_pi16(__m1, __m2); } __funline __m64 _mm_adds_pu8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_paddusb(__m64 __m1, __m64 __m2) { return _mm_adds_pu8(__m1, __m2); } __funline __m64 _mm_adds_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_paddusw(__m64 __m1, __m64 __m2) { return _mm_adds_pu16(__m1, __m2); } __funline __m64 _mm_sub_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_psubb(__m64 __m1, __m64 __m2) { return _mm_sub_pi8(__m1, __m2); } __funline __m64 _mm_sub_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_psubw(__m64 __m1, __m64 __m2) { return _mm_sub_pi16(__m1, __m2); } __funline __m64 _mm_sub_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_psubd(__m64 __m1, __m64 __m2) { return _mm_sub_pi32(__m1, __m2); } #ifndef __SSE2__ #pragma GCC push_options #pragma GCC target("sse2,mmx") #define __DISABLE_SSE2__ #endif /* __SSE2__ */ __funline __m64 _mm_sub_si64(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubq((__v1di)__m1, (__v1di)__m2); } #ifdef __DISABLE_SSE2__ #undef __DISABLE_SSE2__ #pragma GCC pop_options #endif /* __DISABLE_SSE2__ */ __funline __m64 _mm_subs_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_psubsb(__m64 __m1, __m64 __m2) { return _mm_subs_pi8(__m1, __m2); } __funline __m64 _mm_subs_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_psubsw(__m64 __m1, __m64 __m2) { return _mm_subs_pi16(__m1, __m2); } __funline __m64 _mm_subs_pu8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_psubusb(__m64 __m1, __m64 __m2) { return _mm_subs_pu8(__m1, __m2); } __funline __m64 _mm_subs_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_psubusw(__m64 __m1, __m64 __m2) { return _mm_subs_pu16(__m1, __m2); } __funline __m64 _mm_madd_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_pmaddwd(__m64 __m1, __m64 __m2) { return _mm_madd_pi16(__m1, __m2); } __funline __m64 _mm_mulhi_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_pmulhw(__m64 __m1, __m64 __m2) { return _mm_mulhi_pi16(__m1, __m2); } __funline __m64 _mm_mullo_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_pmullw(__m64 __m1, __m64 __m2) { return _mm_mullo_pi16(__m1, __m2); } __funline __m64 _mm_sll_pi16(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psllw((__v4hi)__m, (__v4hi)__count); } __funline __m64 _m_psllw(__m64 __m, __m64 __count) { return _mm_sll_pi16(__m, __count); } __funline __m64 _mm_slli_pi16(__m64 __m, int __count) { return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); } __funline __m64 _m_psllwi(__m64 __m, int __count) { return _mm_slli_pi16(__m, __count); } __funline __m64 _mm_sll_pi32(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_pslld((__v2si)__m, (__v2si)__count); } __funline __m64 _m_pslld(__m64 __m, __m64 __count) { return _mm_sll_pi32(__m, __count); } __funline __m64 _mm_slli_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count); } __funline __m64 _m_pslldi(__m64 __m, int __count) { return _mm_slli_pi32(__m, __count); } __funline __m64 _mm_sll_si64(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psllq((__v1di)__m, (__v1di)__count); } __funline __m64 _m_psllq(__m64 __m, __m64 __count) { return _mm_sll_si64(__m, __count); } __funline __m64 _mm_slli_si64(__m64 __m, int __count) { return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count); } __funline __m64 _m_psllqi(__m64 __m, int __count) { return _mm_slli_si64(__m, __count); } __funline __m64 _mm_sra_pi16(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psraw((__v4hi)__m, (__v4hi)__count); } __funline __m64 _m_psraw(__m64 __m, __m64 __count) { return _mm_sra_pi16(__m, __count); } __funline __m64 _mm_srai_pi16(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count); } __funline __m64 _m_psrawi(__m64 __m, int __count) { return _mm_srai_pi16(__m, __count); } __funline __m64 _mm_sra_pi32(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psrad((__v2si)__m, (__v2si)__count); } __funline __m64 _m_psrad(__m64 __m, __m64 __count) { return _mm_sra_pi32(__m, __count); } __funline __m64 _mm_srai_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_psradi((__v2si)__m, __count); } __funline __m64 _m_psradi(__m64 __m, int __count) { return _mm_srai_pi32(__m, __count); } __funline __m64 _mm_srl_pi16(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psrlw((__v4hi)__m, (__v4hi)__count); } __funline __m64 _m_psrlw(__m64 __m, __m64 __count) { return _mm_srl_pi16(__m, __count); } __funline __m64 _mm_srli_pi16(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); } __funline __m64 _m_psrlwi(__m64 __m, int __count) { return _mm_srli_pi16(__m, __count); } __funline __m64 _mm_srl_pi32(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psrld((__v2si)__m, (__v2si)__count); } __funline __m64 _m_psrld(__m64 __m, __m64 __count) { return _mm_srl_pi32(__m, __count); } __funline __m64 _mm_srli_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count); } __funline __m64 _m_psrldi(__m64 __m, int __count) { return _mm_srli_pi32(__m, __count); } __funline __m64 _mm_srl_si64(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psrlq((__v1di)__m, (__v1di)__count); } __funline __m64 _m_psrlq(__m64 __m, __m64 __count) { return _mm_srl_si64(__m, __count); } __funline __m64 _mm_srli_si64(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count); } __funline __m64 _m_psrlqi(__m64 __m, int __count) { return _mm_srli_si64(__m, __count); } __funline __m64 _mm_and_si64(__m64 __m1, __m64 __m2) { return __builtin_ia32_pand(__m1, __m2); } __funline __m64 _m_pand(__m64 __m1, __m64 __m2) { return _mm_and_si64(__m1, __m2); } __funline __m64 _mm_andnot_si64(__m64 __m1, __m64 __m2) { return __builtin_ia32_pandn(__m1, __m2); } __funline __m64 _m_pandn(__m64 __m1, __m64 __m2) { return _mm_andnot_si64(__m1, __m2); } __funline __m64 _mm_or_si64(__m64 __m1, __m64 __m2) { return __builtin_ia32_por(__m1, __m2); } __funline __m64 _m_por(__m64 __m1, __m64 __m2) { return _mm_or_si64(__m1, __m2); } __funline __m64 _mm_xor_si64(__m64 __m1, __m64 __m2) { return __builtin_ia32_pxor(__m1, __m2); } __funline __m64 _m_pxor(__m64 __m1, __m64 __m2) { return _mm_xor_si64(__m1, __m2); } __funline __m64 _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_pcmpeqb(__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi8(__m1, __m2); } __funline __m64 _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); } __funline __m64 _m_pcmpgtb(__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi8(__m1, __m2); } __funline __m64 _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_pcmpeqw(__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi16(__m1, __m2); } __funline __m64 _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); } __funline __m64 _m_pcmpgtw(__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi16(__m1, __m2); } __funline __m64 _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_pcmpeqd(__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi32(__m1, __m2); } __funline __m64 _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); } __funline __m64 _m_pcmpgtd(__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi32(__m1, __m2); } __funline __m64 _mm_setzero_si64(void) { return (__m64)0LL; } __funline __m64 _mm_set_pi32(int __i1, int __i0) { return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1); } __funline __m64 _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) { return (__m64)__builtin_ia32_vec_init_v4hi(__w0, __w1, __w2, __w3); } __funline __m64 _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7); } __funline __m64 _mm_setr_pi32(int __i0, int __i1) { return _mm_set_pi32(__i1, __i0); } __funline __m64 _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { return _mm_set_pi16(__w3, __w2, __w1, __w0); } __funline __m64 _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7) { return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); } __funline __m64 _mm_set1_pi32(int __i) { return _mm_set_pi32(__i, __i); } __funline __m64 _mm_set1_pi16(short __w) { return _mm_set_pi16(__w, __w, __w, __w); } __funline __m64 _mm_set1_pi8(char __b) { return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); } #ifdef __DISABLE_MMX__ #undef __DISABLE_MMX__ #pragma GCC pop_options #endif /* __DISABLE_MMX__ */ #endif /* __x86_64__ */ #endif /* _MMINTRIN_H_INCLUDED */
15,229
577
jart/cosmopolitan
false
cosmopolitan/third_party/intel/wbnoinvdintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <wbnoinvdintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _WBNOINVDINTRIN_H_INCLUDED #define _WBNOINVDINTRIN_H_INCLUDED #ifndef __WBNOINVD__ #pragma GCC push_options #pragma GCC target("wbnoinvd") #define __DISABLE_WBNOINVD__ #endif /* __WBNOINVD__ */ __funline void _wbnoinvd(void) { __builtin_ia32_wbnoinvd(); } #ifdef __DISABLE_WBNOINVD__ #undef __DISABLE_WBNOINVD__ #pragma GCC pop_options #endif /* __DISABLE_WBNOINVD__ */ #endif /* _WBNOINVDINTRIN_H_INCLUDED */
541
24
jart/cosmopolitan
false
cosmopolitan/third_party/intel/gfniintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _GFNIINTRIN_H_INCLUDED #define _GFNIINTRIN_H_INCLUDED #if !defined(__GFNI__) || !defined(__SSE2__) #pragma GCC push_options #pragma GCC target("gfni,sse2") #define __DISABLE_GFNI__ #endif /* __GFNI__ */ __funline __m128i _mm_gf2p8mul_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi((__v16qi)__A, (__v16qi)__B); } #ifdef __OPTIMIZE__ __funline __m128i _mm_gf2p8affineinv_epi64_epi8(__m128i __A, __m128i __B, const int __C) { return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)__A, (__v16qi)__B, __C); } __funline __m128i _mm_gf2p8affine_epi64_epi8(__m128i __A, __m128i __B, const int __C) { return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)__A, (__v16qi)__B, __C); } #else #define _mm_gf2p8affineinv_epi64_epi8(A, B, C) \ ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi( \ (__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C))) #define _mm_gf2p8affine_epi64_epi8(A, B, C) \ ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi( \ (__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C))) #endif #ifdef __DISABLE_GFNI__ #undef __DISABLE_GFNI__ #pragma GCC pop_options #endif /* __DISABLE_GFNI__ */ #if !defined(__GFNI__) || !defined(__AVX__) #pragma GCC push_options #pragma GCC target("gfni,avx") #define __DISABLE_GFNIAVX__ #endif /* __GFNIAVX__ */ __funline __m256i _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi((__v32qi)__A, (__v32qi)__B); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_gf2p8affineinv_epi64_epi8(__m256i __A, __m256i __B, const int __C) { return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)__A, (__v32qi)__B, __C); } __funline __m256i _mm256_gf2p8affine_epi64_epi8(__m256i __A, __m256i __B, const int __C) { return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)__A, (__v32qi)__B, __C); } #else #define _mm256_gf2p8affineinv_epi64_epi8(A, B, C) \ ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi( \ (__v32qi)(__m256i)(A), (__v32qi)(__m256i)(B), (int)(C))) #define _mm256_gf2p8affine_epi64_epi8(A, B, C) \ ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi( \ (__v32qi)(__m256i)(A), (__v32qi)(__m256i)(B), (int)(C))) #endif #ifdef __DISABLE_GFNIAVX__ #undef __DISABLE_GFNIAVX__ #pragma GCC pop_options #endif /* __GFNIAVX__ */ #if !defined(__GFNI__) || !defined(__AVX512VL__) #pragma GCC push_options #pragma GCC target("gfni,avx512vl") #define __DISABLE_GFNIAVX512VL__ #endif /* __GFNIAVX512VL__ */ __funline __m128i _mm_mask_gf2p8mul_epi8(__m128i __A, __mmask16 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi_mask( (__v16qi)__C, (__v16qi)__D, (__v16qi)__A, __B); } __funline __m128i _mm_maskz_gf2p8mul_epi8(__mmask16 __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi_mask( (__v16qi)__B, (__v16qi)__C, (__v16qi)_mm_setzero_si128(), __A); } #ifdef __OPTIMIZE__ __funline __m128i _mm_mask_gf2p8affineinv_epi64_epi8(__m128i __A, __mmask16 __B, __m128i __C, __m128i __D, const int __E) { return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( (__v16qi)__C, (__v16qi)__D, __E, (__v16qi)__A, __B); } __funline __m128i _mm_maskz_gf2p8affineinv_epi64_epi8(__mmask16 __A, __m128i __B, __m128i __C, const int __D) { return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( (__v16qi)__B, (__v16qi)__C, __D, (__v16qi)_mm_setzero_si128(), __A); } __funline __m128i _mm_mask_gf2p8affine_epi64_epi8(__m128i __A, __mmask16 __B, __m128i __C, __m128i __D, const int __E) { return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( (__v16qi)__C, (__v16qi)__D, __E, (__v16qi)__A, __B); } __funline __m128i _mm_maskz_gf2p8affine_epi64_epi8(__mmask16 __A, __m128i __B, __m128i __C, const int __D) { return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( (__v16qi)__B, (__v16qi)__C, __D, (__v16qi)_mm_setzero_si128(), __A); } #else #define _mm_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \ (__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), \ (__v16qi)(__m128i)(A), (__mmask16)(B))) #define _mm_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \ (__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), \ (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)(A))) #define _mm_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( \ (__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), \ (__v16qi)(__m128i)(A), (__mmask16)(B))) #define _mm_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( \ (__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), \ (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)(A))) #endif #ifdef __DISABLE_GFNIAVX512VL__ #undef __DISABLE_GFNIAVX512VL__ #pragma GCC pop_options #endif /* __GFNIAVX512VL__ */ #if !defined(__GFNI__) || !defined(__AVX512VL__) || !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("gfni,avx512vl,avx512bw") #define __DISABLE_GFNIAVX512VLBW__ #endif /* __GFNIAVX512VLBW__ */ __funline __m256i _mm256_mask_gf2p8mul_epi8(__m256i __A, __mmask32 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi_mask( (__v32qi)__C, (__v32qi)__D, (__v32qi)__A, __B); } __funline __m256i _mm256_maskz_gf2p8mul_epi8(__mmask32 __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi_mask( (__v32qi)__B, (__v32qi)__C, (__v32qi)_mm256_setzero_si256(), __A); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_mask_gf2p8affineinv_epi64_epi8(__m256i __A, __mmask32 __B, __m256i __C, __m256i __D, const int __E) { return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( (__v32qi)__C, (__v32qi)__D, __E, (__v32qi)__A, __B); } __funline __m256i _mm256_maskz_gf2p8affineinv_epi64_epi8(__mmask32 __A, __m256i __B, __m256i __C, const int __D) { return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( (__v32qi)__B, (__v32qi)__C, __D, (__v32qi)_mm256_setzero_si256(), __A); } __funline __m256i _mm256_mask_gf2p8affine_epi64_epi8(__m256i __A, __mmask32 __B, __m256i __C, __m256i __D, const int __E) { return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( (__v32qi)__C, (__v32qi)__D, __E, (__v32qi)__A, __B); } __funline __m256i _mm256_maskz_gf2p8affine_epi64_epi8(__mmask32 __A, __m256i __B, __m256i __C, const int __D) { return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( (__v32qi)__B, (__v32qi)__C, __D, (__v32qi)_mm256_setzero_si256(), __A); } #else #define _mm256_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \ (__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), \ (__v32qi)(__m256i)(A), (__mmask32)(B))) #define _mm256_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \ (__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \ (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)(A))) #define _mm256_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( \ (__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), \ (__v32qi)(__m256i)(A), (__mmask32)(B))) #define _mm256_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( \ (__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \ (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)(A))) #endif #ifdef __DISABLE_GFNIAVX512VLBW__ #undef __DISABLE_GFNIAVX512VLBW__ #pragma GCC pop_options #endif /* __GFNIAVX512VLBW__ */ #if !defined(__GFNI__) || !defined(__AVX512F__) || !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("gfni,avx512f,avx512bw") #define __DISABLE_GFNIAVX512FBW__ #endif /* __GFNIAVX512FBW__ */ __funline __m512i _mm512_mask_gf2p8mul_epi8(__m512i __A, __mmask64 __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi_mask( (__v64qi)__C, (__v64qi)__D, (__v64qi)__A, __B); } __funline __m512i _mm512_maskz_gf2p8mul_epi8(__mmask64 __A, __m512i __B, __m512i __C) { return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi_mask( (__v64qi)__B, (__v64qi)__C, (__v64qi)_mm512_setzero_si512(), __A); } __funline __m512i _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi((__v64qi)__A, (__v64qi)__B); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_mask_gf2p8affineinv_epi64_epi8(__m512i __A, __mmask64 __B, __m512i __C, __m512i __D, const int __E) { return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( (__v64qi)__C, (__v64qi)__D, __E, (__v64qi)__A, __B); } __funline __m512i _mm512_maskz_gf2p8affineinv_epi64_epi8(__mmask64 __A, __m512i __B, __m512i __C, const int __D) { return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( (__v64qi)__B, (__v64qi)__C, __D, (__v64qi)_mm512_setzero_si512(), __A); } __funline __m512i _mm512_gf2p8affineinv_epi64_epi8(__m512i __A, __m512i __B, const int __C) { return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)__A, (__v64qi)__B, __C); } __funline __m512i _mm512_mask_gf2p8affine_epi64_epi8(__m512i __A, __mmask64 __B, __m512i __C, __m512i __D, const int __E) { return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( (__v64qi)__C, (__v64qi)__D, __E, (__v64qi)__A, __B); } __funline __m512i _mm512_maskz_gf2p8affine_epi64_epi8(__mmask64 __A, __m512i __B, __m512i __C, const int __D) { return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( (__v64qi)__B, (__v64qi)__C, __D, (__v64qi)_mm512_setzero_si512(), __A); } __funline __m512i _mm512_gf2p8affine_epi64_epi8(__m512i __A, __m512i __B, const int __C) { return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)__A, (__v64qi)__B, __C); } #else #define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \ (__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), \ (__v64qi)(__m512i)(A), (__mmask64)(B))) #define _mm512_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \ (__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \ (__v64qi)(__m512i)_mm512_setzero_si512(), (__mmask64)(A))) #define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \ ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi( \ (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C))) #define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( \ (__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), \ (__v64qi)(__m512i)(A), (__mmask64)(B))) #define _mm512_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( \ (__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \ (__v64qi)(__m512i)_mm512_setzero_si512(), (__mmask64)(A))) #define _mm512_gf2p8affine_epi64_epi8(A, B, C) \ ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi( \ (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C))) #endif #ifdef __DISABLE_GFNIAVX512FBW__ #undef __DISABLE_GFNIAVX512FBW__ #pragma GCC pop_options #endif /* __GFNIAVX512FBW__ */ #endif /* _GFNIINTRIN_H_INCLUDED */
13,838
312
jart/cosmopolitan
false
cosmopolitan/third_party/intel/fma4intrin.internal.h
#ifndef _X86INTRIN_H_INCLUDED #error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _FMA4INTRIN_H_INCLUDED #define _FMA4INTRIN_H_INCLUDED #include "third_party/intel/ammintrin.internal.h" #ifndef __FMA4__ #pragma GCC push_options #pragma GCC target("fma4") #define __DISABLE_FMA4__ #endif /* __FMA4__ */ __funline __m128 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m128d _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); } __funline __m128 _mm_msub_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m128d _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C); } __funline __m128 _mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m128d _mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); } __funline __m128 _mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m128d _mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); } __funline __m128 _mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m128d _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); } /* 256b Floating point multiply/add type instructions. */ __funline __m256 _mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m256d _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m256 _mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); } __funline __m256d _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); } __funline __m256 _mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m256d _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m256 _mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); } __funline __m256d _mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); } __funline __m256 _mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m256d _mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m256 _mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); } __funline __m256d _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); } #ifdef __DISABLE_FMA4__ #undef __DISABLE_FMA4__ #pragma GCC pop_options #endif /* __DISABLE_FMA4__ */ #endif
6,893
185
jart/cosmopolitan
false
cosmopolitan/third_party/intel/cldemoteintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <cldemoteintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _CLDEMOTE_H_INCLUDED #define _CLDEMOTE_H_INCLUDED #ifndef __CLDEMOTE__ #pragma GCC push_options #pragma GCC target("cldemote") #define __DISABLE_CLDEMOTE__ #endif /* __CLDEMOTE__ */ __funline void _cldemote(void *__A) { __builtin_ia32_cldemote(__A); } #ifdef __DISABLE_CLDEMOTE__ #undef __DISABLE_CLDEMOTE__ #pragma GCC pop_options #endif /* __DISABLE_CLDEMOTE__ */ #endif /* _CLDEMOTE_H_INCLUDED */
534
22
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512pfintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512pfintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512PFINTRIN_H_INCLUDED #define _AVX512PFINTRIN_H_INCLUDED #ifndef __AVX512PF__ #pragma GCC push_options #pragma GCC target("avx512pf") #define __DISABLE_AVX512PF__ #endif /* __AVX512PF__ */ typedef long long __v8di __attribute__((__vector_size__(64))); typedef int __v16si __attribute__((__vector_size__(64))); typedef long long __m512i __attribute__((__vector_size__(64), __may_alias__)); typedef unsigned char __mmask8; typedef unsigned short __mmask16; #ifdef __OPTIMIZE__ __funline void _mm512_prefetch_i32gather_pd(__m256i __index, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfdpd((__mmask8)0xFF, (__v8si)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i32gather_ps(__m512i __index, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfdps((__mmask16)0xFFFF, (__v16si)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i32gather_pd(__m256i __index, __mmask8 __mask, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfdpd(__mask, (__v8si)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i32gather_ps(__m512i __index, __mmask16 __mask, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfdps(__mask, (__v16si)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i64gather_pd(__m512i __index, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfqpd((__mmask8)0xFF, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i64gather_ps(__m512i __index, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfqps((__mmask8)0xFF, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i64gather_pd(__m512i __index, __mmask8 __mask, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfqpd(__mask, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i64gather_ps(__m512i __index, __mmask8 __mask, void const *__addr, int __scale, int __hint) { __builtin_ia32_gatherpfqps(__mask, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i32scatter_pd(void *__addr, __m256i __index, int __scale, int __hint) { __builtin_ia32_scatterpfdpd((__mmask8)0xFF, (__v8si)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i32scatter_ps(void *__addr, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfdps((__mmask16)0xFFFF, (__v16si)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i32scatter_pd(void *__addr, __mmask8 __mask, __m256i __index, int __scale, int __hint) { __builtin_ia32_scatterpfdpd(__mask, (__v8si)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i32scatter_ps(void *__addr, __mmask16 __mask, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfdps(__mask, (__v16si)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i64scatter_pd(void *__addr, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfqpd((__mmask8)0xFF, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_prefetch_i64scatter_ps(void *__addr, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfqps((__mmask8)0xFF, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i64scatter_pd(void *__addr, __mmask8 __mask, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfqpd(__mask, (__v8di)__index, __addr, __scale, __hint); } __funline void _mm512_mask_prefetch_i64scatter_ps(void *__addr, __mmask8 __mask, __m512i __index, int __scale, int __hint) { __builtin_ia32_scatterpfqps(__mask, (__v8di)__index, __addr, __scale, __hint); } #else #define _mm512_prefetch_i32gather_pd(INDEX, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfdpd((__mmask8)0xFF, (__v8si)(__m256i)INDEX, \ (void const *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i32gather_ps(INDEX, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfdps((__mmask16)0xFFFF, (__v16si)(__m512i)INDEX, \ (void const *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i32gather_pd(INDEX, MASK, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfdpd((__mmask8)MASK, (__v8si)(__m256i)INDEX, \ (void const *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i32gather_ps(INDEX, MASK, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfdps((__mmask16)MASK, (__v16si)(__m512i)INDEX, \ (void const *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i64gather_pd(INDEX, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfqpd((__mmask8)0xFF, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i64gather_ps(INDEX, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfqps((__mmask8)0xFF, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i64gather_pd(INDEX, MASK, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfqpd((__mmask8)MASK, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i64gather_ps(INDEX, MASK, ADDR, SCALE, HINT) \ __builtin_ia32_gatherpfqps((__mmask8)MASK, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i32scatter_pd(ADDR, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfdpd((__mmask8)0xFF, (__v8si)(__m256i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i32scatter_ps(ADDR, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfdps((__mmask16)0xFFFF, (__v16si)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i32scatter_pd(ADDR, MASK, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfdpd((__mmask8)MASK, (__v8si)(__m256i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i32scatter_ps(ADDR, MASK, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfdps((__mmask16)MASK, (__v16si)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i64scatter_pd(ADDR, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfqpd((__mmask8)0xFF, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_prefetch_i64scatter_ps(ADDR, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfqps((__mmask8)0xFF, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i64scatter_pd(ADDR, MASK, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfqpd((__mmask8)MASK, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #define _mm512_mask_prefetch_i64scatter_ps(ADDR, MASK, INDEX, SCALE, HINT) \ __builtin_ia32_scatterpfqps((__mmask8)MASK, (__v8di)(__m512i)INDEX, \ (void *)ADDR, (int)SCALE, (int)HINT) #endif #ifdef __DISABLE_AVX512PF__ #undef __DISABLE_AVX512PF__ #pragma GCC pop_options #endif /* __DISABLE_AVX512PF__ */ #endif /* _AVX512PFINTRIN_H_INCLUDED */
8,953
191
jart/cosmopolitan
false
cosmopolitan/third_party/intel/pconfigintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <pconfigintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _PCONFIGINTRIN_H_INCLUDED #define _PCONFIGINTRIN_H_INCLUDED #ifndef __PCONFIG__ #pragma GCC push_options #pragma GCC target("pconfig") #define __DISABLE_PCONFIG__ #endif /* __PCONFIG__ */ #define __pconfig_b(leaf, b, retval) \ __asm__ __volatile__("pconfig\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b) \ : "c" \ "c") #define __pconfig_generic(leaf, b, c, d, retval) \ __asm__ __volatile__("pconfig\n\t" \ : "=a"(retval), "=b"(b), "=c"(c), "=d"(d) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") __funline unsigned int _pconfig_u32(const unsigned int __L, size_t __D[]) { enum __pconfig_type { __PCONFIG_KEY_PROGRAM = 0x01, }; unsigned int __R = 0; if (!__builtin_constant_p(__L)) __pconfig_generic(__L, __D[0], __D[1], __D[2], __R); else switch (__L) { case __PCONFIG_KEY_PROGRAM: __pconfig_b(__L, __D[0], __R); break; default: __pconfig_generic(__L, __D[0], __D[1], __D[2], __R); } return __R; } #ifdef __DISABLE_PCONFIG__ #undef __DISABLE_PCONFIG__ #pragma GCC pop_options #endif /* __DISABLE_PCONFIG__ */ #endif /* _PCONFIGINTRIN_H_INCLUDED */
1,490
53
jart/cosmopolitan
false
cosmopolitan/third_party/intel/wmmintrin.internal.h
#ifndef _WMMINTRIN_H_INCLUDED #define _WMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/emmintrin.internal.h" #if !defined(__AES__) || !defined(__SSE2__) #pragma GCC push_options #pragma GCC target("aes,sse2") #define __DISABLE_AES__ #endif /* __AES__ */ __funline __m128i _mm_aesdec_si128(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_aesdec128((__v2di)__X, (__v2di)__Y); } __funline __m128i _mm_aesdeclast_si128(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__X, (__v2di)__Y); } __funline __m128i _mm_aesenc_si128(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_aesenc128((__v2di)__X, (__v2di)__Y); } __funline __m128i _mm_aesenclast_si128(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_aesenclast128((__v2di)__X, (__v2di)__Y); } __funline __m128i _mm_aesimc_si128(__m128i __X) { return (__m128i)__builtin_ia32_aesimc128((__v2di)__X); } #ifdef __OPTIMIZE__ __funline __m128i _mm_aeskeygenassist_si128(__m128i __X, const int __C) { return (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)__X, __C); } #else #define _mm_aeskeygenassist_si128(X, C) \ ((__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(X), (int)(C))) #endif #ifdef __DISABLE_AES__ #undef __DISABLE_AES__ #pragma GCC pop_options #endif /* __DISABLE_AES__ */ #if !defined(__PCLMUL__) || !defined(__SSE2__) #pragma GCC push_options #pragma GCC target("pclmul,sse2") #define __DISABLE_PCLMUL__ #endif /* __PCLMUL__ */ #ifdef __OPTIMIZE__ __funline __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I) { return (__m128i)__builtin_ia32_pclmulqdq128((__v2di)__X, (__v2di)__Y, __I); } #else #define _mm_clmulepi64_si128(X, Y, I) \ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \ (__v2di)(__m128i)(Y), (int)(I))) #endif #ifdef __DISABLE_PCLMUL__ #undef __DISABLE_PCLMUL__ #pragma GCC pop_options #endif /* __DISABLE_PCLMUL__ */ #endif /* __x86_64__ */ #endif /* _WMMINTRIN_H_INCLUDED */
2,070
69
jart/cosmopolitan
false
cosmopolitan/third_party/intel/lzcntintrin.internal.h
#if !defined _X86INTRIN_H_INCLUDED && !defined _IMMINTRIN_H_INCLUDED #error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _LZCNTINTRIN_H_INCLUDED #define _LZCNTINTRIN_H_INCLUDED #ifndef __LZCNT__ #pragma GCC push_options #pragma GCC target("lzcnt") #define __DISABLE_LZCNT__ #endif /* __LZCNT__ */ __funline unsigned short __lzcnt16(unsigned short __X) { return __builtin_ia32_lzcnt_u16(__X); } __funline unsigned int __lzcnt32(unsigned int __X) { return __builtin_ia32_lzcnt_u32(__X); } __funline unsigned int _lzcnt_u32(unsigned int __X) { return __builtin_ia32_lzcnt_u32(__X); } #ifdef __x86_64__ __funline unsigned long long __lzcnt64(unsigned long long __X) { return __builtin_ia32_lzcnt_u64(__X); } __funline unsigned long long _lzcnt_u64(unsigned long long __X) { return __builtin_ia32_lzcnt_u64(__X); } #endif #ifdef __DISABLE_LZCNT__ #undef __DISABLE_LZCNT__ #pragma GCC pop_options #endif /* __DISABLE_LZCNT__ */ #endif /* _LZCNTINTRIN_H_INCLUDED */
1,017
42
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xtestintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <xtestintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _XTESTINTRIN_H_INCLUDED #define _XTESTINTRIN_H_INCLUDED #ifndef __RTM__ #pragma GCC push_options #pragma GCC target("rtm") #define __DISABLE_RTM__ #endif /* __RTM__ */ __funline int _xtest(void) { return __builtin_ia32_xtest(); } #ifdef __DISABLE_RTM__ #undef __DISABLE_RTM__ #pragma GCC pop_options #endif /* __DISABLE_RTM__ */ #endif /* _XTESTINTRIN_H_INCLUDED */
494
24
jart/cosmopolitan
false
cosmopolitan/third_party/intel/ammintrin.internal.h
#ifndef _AMMINTRIN_H_INCLUDED #define _AMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/pmmintrin.internal.h" #ifndef __SSE4A__ #pragma GCC push_options #pragma GCC target("sse4a") #define __DISABLE_SSE4A__ #endif /* __SSE4A__ */ __funline void _mm_stream_sd(double* __P, __m128d __Y) { __builtin_ia32_movntsd(__P, (__v2df)__Y); } __funline void _mm_stream_ss(float* __P, __m128 __Y) { __builtin_ia32_movntss(__P, (__v4sf)__Y); } __funline __m128i _mm_extract_si64(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_extrq((__v2di)__X, (__v16qi)__Y); } #ifdef __OPTIMIZE__ __funline __m128i _mm_extracti_si64(__m128i __X, unsigned const int __I, unsigned const int __L) { return (__m128i)__builtin_ia32_extrqi((__v2di)__X, __I, __L); } #else #define _mm_extracti_si64(X, I, L) \ ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(X), (unsigned int)(I), \ (unsigned int)(L))) #endif __funline __m128i _mm_insert_si64(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_insertq((__v2di)__X, (__v2di)__Y); } #ifdef __OPTIMIZE__ __funline __m128i _mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) { return (__m128i)__builtin_ia32_insertqi((__v2di)__X, (__v2di)__Y, __I, __L); } #else #define _mm_inserti_si64(X, Y, I, L) \ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(X), \ (__v2di)(__m128i)(Y), (unsigned int)(I), \ (unsigned int)(L))) #endif #ifdef __DISABLE_SSE4A__ #undef __DISABLE_SSE4A__ #pragma GCC pop_options #endif /* __DISABLE_SSE4A__ */ #endif /* __x86_64__ */ #endif /* _AMMINTRIN_H_INCLUDED */
1,913
59
jart/cosmopolitan
false
cosmopolitan/third_party/intel/sgxintrin.internal.h
#ifndef _SGXINTRIN_H_INCLUDED #define _SGXINTRIN_H_INCLUDED #ifdef __x86_64__ #ifndef __SGX__ #pragma GCC push_options #pragma GCC target("sgx") #define __DISABLE_SGX__ #endif /* __SGX__ */ #define __encls_bc(leaf, b, c, retval) \ __asm__ __volatile__("encls\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b), "c"(c) \ : "cc") #define __encls_bcd(leaf, b, c, d, retval) \ __asm__ __volatile__("encls\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") #define __encls_c(leaf, c, retval) \ __asm__ __volatile__("encls\n\t" : "=a"(retval) : "a"(leaf), "c"(c) : "cc") #define __encls_edbgrd(leaf, b, c, retval) \ __asm__ __volatile__("encls\n\t" : "=a"(retval), "=b"(b) : "a"(leaf), "c"(c)) #define __encls_generic(leaf, b, c, d, retval) \ __asm__ __volatile__("encls\n\t" \ : "=a"(retval), "=b"(b), "=c"(c), "=d"(d) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") #define __enclu_bc(leaf, b, c, retval) \ __asm__ __volatile__("enclu\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b), "c"(c) \ : "cc") #define __enclu_bcd(leaf, b, c, d, retval) \ __asm__ __volatile__("enclu\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") #define __enclu_eenter(leaf, b, c, retval) \ __asm__ __volatile__("enclu\n\t" \ : "=a"(retval), "=c"(c) \ : "a"(leaf), "b"(b), "c"(c) \ : "cc") #define __enclu_eexit(leaf, b, c, retval) \ __asm__ __volatile__("enclu\n\t" \ : "=a"(retval), "=c"(c) \ : "a"(leaf), "b"(b) \ : "cc") #define __enclu_generic(leaf, b, c, d, retval) \ __asm__ __volatile__("enclu\n\t" \ : "=a"(retval), "=b"(b), "=c"(c), "=d"(d) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") #define __enclv_bc(leaf, b, c, retval) \ __asm__ __volatile__("enclv\n\t" \ : "=a"(retval) \ : "a"(leaf), "b"(b), "c"(c) \ : "cc") #define __enclv_cd(leaf, c, d, retval) \ __asm__ __volatile__("enclv\n\t" \ : "=a"(retval) \ : "a"(leaf), "c"(c), "d"(d) \ : "cc") #define __enclv_generic(leaf, b, c, d, retval) \ __asm__ __volatile__("enclv\n\t" \ : "=a"(retval), "=b"(b), "=c"(b), "=d"(d) \ : "a"(leaf), "b"(b), "c"(c), "d"(d) \ : "cc") __funline unsigned int _encls_u32(const unsigned int __L, size_t __D[]) { enum __encls_type { __SGX_ECREATE = 0x00, __SGX_EADD = 0x01, __SGX_EINIT = 0x02, __SGX_EREMOVE = 0x03, __SGX_EDBGRD = 0x04, __SGX_EDBGWR = 0x05, __SGX_EEXTEND = 0x06, __SGX_ELDB = 0x07, __SGX_ELDU = 0x08, __SGX_EBLOCK = 0x09, __SGX_EPA = 0x0A, __SGX_EWB = 0x0B, __SGX_ETRACK = 0x0C, __SGX_EAUG = 0x0D, __SGX_EMODPR = 0x0E, __SGX_EMODT = 0x0F, __SGX_ERDINFO = 0x10, __SGX_ETRACKC = 0x11, __SGX_ELDBC = 0x12, __SGX_ELDUC = 0x13 }; enum __encls_type __T = (enum __encls_type)__L; unsigned int __R = 0; if (!__builtin_constant_p(__T)) __encls_generic(__L, __D[0], __D[1], __D[2], __R); else switch (__T) { case __SGX_ECREATE: case __SGX_EADD: case __SGX_EDBGWR: case __SGX_EEXTEND: case __SGX_EPA: case __SGX_EMODPR: case __SGX_EMODT: case __SGX_EAUG: case __SGX_ERDINFO: __encls_bc(__L, __D[0], __D[1], __R); break; case __SGX_EINIT: case __SGX_ELDB: case __SGX_ELDU: case __SGX_EWB: case __SGX_ELDBC: case __SGX_ELDUC: __encls_bcd(__L, __D[0], __D[1], __D[2], __R); break; case __SGX_EREMOVE: case __SGX_EBLOCK: case __SGX_ETRACK: case __SGX_ETRACKC: __encls_c(__L, __D[1], __R); break; case __SGX_EDBGRD: __encls_edbgrd(__L, __D[0], __D[1], __R); break; default: __encls_generic(__L, __D[0], __D[1], __D[2], __R); } return __R; } __funline unsigned int _enclu_u32(const unsigned int __L, size_t __D[]) { enum __enclu_type { __SGX_EREPORT = 0x00, __SGX_EGETKEY = 0x01, __SGX_EENTER = 0x02, __SGX_ERESUME = 0x03, __SGX_EEXIT = 0x04, __SGX_EACCEPT = 0x05, __SGX_EMODPE = 0x06, __SGX_EACCEPTCOPY = 0x07 }; enum __enclu_type __T = (enum __enclu_type)__L; unsigned int __R = 0; if (!__builtin_constant_p(__T)) __enclu_generic(__L, __D[0], __D[1], __D[2], __R); else switch (__T) { case __SGX_EREPORT: case __SGX_EACCEPTCOPY: __enclu_bcd(__L, __D[0], __D[1], __D[2], __R); break; case __SGX_EGETKEY: case __SGX_ERESUME: case __SGX_EACCEPT: case __SGX_EMODPE: __enclu_bc(__L, __D[0], __D[1], __R); break; case __SGX_EENTER: __enclu_eenter(__L, __D[0], __D[1], __R); break; case __SGX_EEXIT: __enclu_eexit(__L, __D[0], __D[1], __R); break; default: __enclu_generic(__L, __D[0], __D[1], __D[2], __R); } return __R; } __funline unsigned int _enclv_u32(const unsigned int __L, size_t __D[]) { enum __enclv_type { __SGX_EDECVIRTCHILD = 0x00, __SGX_EINCVIRTCHILD = 0x01, __SGX_ESETCONTEXT = 0x02 }; unsigned int __R = 0; if (!__builtin_constant_p(__L)) __enclv_generic(__L, __D[0], __D[1], __D[2], __R); else switch (__L) { case __SGX_EDECVIRTCHILD: case __SGX_EINCVIRTCHILD: __enclv_bc(__L, __D[0], __D[1], __R); break; case __SGX_ESETCONTEXT: __enclv_cd(__L, __D[1], __D[2], __R); break; default: __enclv_generic(__L, __D[0], __D[1], __D[2], __R); } return __R; } #ifdef __DISABLE_SGX__ #undef __DISABLE_SGX__ #pragma GCC pop_options #endif /* __DISABLE_SGX__ */ #endif /* __x86_64__ */ #endif /* _SGXINTRIN_H_INCLUDED */
6,809
216
jart/cosmopolitan
false
cosmopolitan/third_party/intel/cpuid.internal.h
#ifndef COSMOPOLITAN_THIRD_PARTY_INTEL_CPUID_INTERNAL_H_ #define COSMOPOLITAN_THIRD_PARTY_INTEL_CPUID_INTERNAL_H_ #ifdef __x86_64__ #if !(__ASSEMBLER__ + __LINKER__ + 0) #define bit_SSE3 (1 << 0) #define bit_PCLMUL (1 << 1) #define bit_LZCNT (1 << 5) #define bit_SSSE3 (1 << 9) #define bit_FMA (1 << 12) #define bit_CMPXCHG16B (1 << 13) #define bit_SSE4_1 (1 << 19) #define bit_SSE4_2 (1 << 20) #define bit_MOVBE (1 << 22) #define bit_POPCNT (1 << 23) #define bit_AES (1 << 25) #define bit_XSAVE (1 << 26) #define bit_OSXSAVE (1 << 27) #define bit_AVX (1 << 28) #define bit_F16C (1 << 29) #define bit_RDRND (1 << 30) #define bit_CMPXCHG8B (1 << 8) #define bit_CMOV (1 << 15) #define bit_MMX (1 << 23) #define bit_FXSAVE (1 << 24) #define bit_SSE (1 << 25) #define bit_SSE2 (1 << 26) #define bit_LAHF_LM (1 << 0) #define bit_ABM (1 << 5) #define bit_SSE4a (1 << 6) #define bit_PRFCHW (1 << 8) #define bit_XOP (1 << 11) #define bit_LWP (1 << 15) #define bit_FMA4 (1 << 16) #define bit_TBM (1 << 21) #define bit_MWAITX (1 << 29) #define bit_MMXEXT (1 << 22) #define bit_LM (1 << 29) #define bit_3DNOWP (1 << 30) #define bit_3DNOW (1u << 31) #define bit_CLZERO (1 << 0) #define bit_WBNOINVD (1 << 9) #define bit_FSGSBASE (1 << 0) #define bit_SGX (1 << 2) #define bit_BMI (1 << 3) #define bit_HLE (1 << 4) #define bit_AVX2 (1 << 5) #define bit_BMI2 (1 << 8) #define bit_RTM (1 << 11) #define bit_MPX (1 << 14) #define bit_AVX512F (1 << 16) #define bit_AVX512DQ (1 << 17) #define bit_RDSEED (1 << 18) #define bit_ADX (1 << 19) #define bit_AVX512IFMA (1 << 21) #define bit_CLFLUSHOPT (1 << 23) #define bit_CLWB (1 << 24) #define bit_AVX512PF (1 << 26) #define bit_AVX512ER (1 << 27) #define bit_AVX512CD (1 << 28) #define bit_SHA (1 << 29) #define bit_AVX512BW (1 << 30) #define bit_AVX512VL (1u << 31) #define bit_PREFETCHWT1 (1 << 0) #define bit_AVX512VBMI (1 << 1) #define bit_PKU (1 << 3) #define bit_OSPKE (1 << 4) #define bit_WAITPKG (1 << 5) #define bit_AVX512VBMI2 (1 << 6) #define bit_SHSTK (1 << 7) #define bit_GFNI (1 << 8) #define bit_VAES (1 << 9) #define bit_AVX512VNNI (1 << 11) #define bit_VPCLMULQDQ (1 << 10) #define bit_AVX512BITALG (1 << 12) #define bit_AVX512VPOPCNTDQ (1 << 14) #define bit_RDPID (1 << 22) #define bit_MOVDIRI (1 << 27) #define bit_MOVDIR64B (1 << 28) #define bit_CLDEMOTE (1 << 25) #define bit_AVX5124VNNIW (1 << 2) #define bit_AVX5124FMAPS (1 << 3) #define bit_IBT (1 << 20) #define bit_PCONFIG (1 << 18) #define bit_BNDREGS (1 << 3) #define bit_BNDCSR (1 << 4) #define bit_XSAVEOPT (1 << 0) #define bit_XSAVEC (1 << 1) #define bit_XSAVES (1 << 3) #define bit_PTWRITE (1 << 4) #define signature_AMD_ebx 0x68747541 #define signature_AMD_ecx 0x444d4163 #define signature_AMD_edx 0x69746e65 #define signature_CENTAUR_ebx 0x746e6543 #define signature_CENTAUR_ecx 0x736c7561 #define signature_CENTAUR_edx 0x48727561 #define signature_CYRIX_ebx 0x69727943 #define signature_CYRIX_ecx 0x64616574 #define signature_CYRIX_edx 0x736e4978 #define signature_INTEL_ebx 0x756e6547 #define signature_INTEL_ecx 0x6c65746e #define signature_INTEL_edx 0x49656e69 #define signature_TM1_ebx 0x6e617254 #define signature_TM1_ecx 0x55504361 #define signature_TM1_edx 0x74656d73 #define signature_TM2_ebx 0x756e6547 #define signature_TM2_ecx 0x3638784d #define signature_TM2_edx 0x54656e69 #define signature_NSC_ebx 0x646f6547 #define signature_NSC_ecx 0x43534e20 #define signature_NSC_edx 0x79622065 #define signature_NEXGEN_ebx 0x4778654e #define signature_NEXGEN_ecx 0x6e657669 #define signature_NEXGEN_edx 0x72446e65 #define signature_RISE_ebx 0x65736952 #define signature_RISE_ecx 0x65736952 #define signature_RISE_edx 0x65736952 #define signature_SIS_ebx 0x20536953 #define signature_SIS_ecx 0x20536953 #define signature_SIS_edx 0x20536953 #define signature_UMC_ebx 0x20434d55 #define signature_UMC_ecx 0x20434d55 #define signature_UMC_edx 0x20434d55 #define signature_VIA_ebx 0x20414956 #define signature_VIA_ecx 0x20414956 #define signature_VIA_edx 0x20414956 #define signature_VORTEX_ebx 0x74726f56 #define signature_VORTEX_ecx 0x436f5320 #define signature_VORTEX_edx 0x36387865 #ifndef __x86_64__ #define __cpuid(level, a, b, c, d) \ do { \ if (__builtin_constant_p(level) && (level) != 1) \ __asm__("cpuid\n\t" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(level)); \ else \ __asm__("cpuid\n\t" \ : "=a"(a), "=b"(b), "=c"(c), "=d"(d) \ : "0"(level), "1"(0), "2"(0)); \ } while (0) #else #define __cpuid(level, a, b, c, d) \ __asm__("cpuid\n\t" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(level)) #endif #define __cpuid_count(level, count, a, b, c, d) \ __asm__("cpuid\n\t" \ : "=a"(a), "=b"(b), "=c"(c), "=d"(d) \ : "0"(level), "2"(count)) static __inline unsigned int __get_cpuid_max(unsigned int __ext, unsigned int *__sig) { unsigned int __eax, __ebx, __ecx, __edx; #ifndef __x86_64__ #if __GNUC__ >= 3 __asm__("pushf{l|d}\n\t" "pushf{l|d}\n\t" "pop{l}\t%0\n\t" "mov{l}\t{%0, %1|%1, %0}\n\t" "xor{l}\t{%2, %0|%0, %2}\n\t" "push{l}\t%0\n\t" "popf{l|d}\n\t" "pushf{l|d}\n\t" "pop{l}\t%0\n\t" "popf{l|d}\n\t" : "=&r"(__eax), "=&r"(__ebx) : "i"(0x00200000)); #else __asm__("pushfl\n\t" "pushfl\n\t" "popl\t%0\n\t" "movl\t%0, %1\n\t" "xorl\t%2, %0\n\t" "pushl\t%0\n\t" "popfl\n\t" "pushfl\n\t" "popl\t%0\n\t" "popfl\n\t" : "=&r"(__eax), "=&r"(__ebx) : "i"(0x00200000)); #endif if (!((__eax ^ __ebx) & 0x00200000)) return 0; #endif __cpuid(__ext, __eax, __ebx, __ecx, __edx); if (__sig) *__sig = __ebx; return __eax; } static __inline int __get_cpuid(unsigned int __leaf, unsigned int *__eax, unsigned int *__ebx, unsigned int *__ecx, unsigned int *__edx) { unsigned int __ext = __leaf & 0x80000000; unsigned int __maxlevel = __get_cpuid_max(__ext, 0); if (__maxlevel == 0 || __maxlevel < __leaf) return 0; __cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx); return 1; } static __inline int __get_cpuid_count(unsigned int __leaf, unsigned int __subleaf, unsigned int *__eax, unsigned int *__ebx, unsigned int *__ecx, unsigned int *__edx) { unsigned int __ext = __leaf & 0x80000000; unsigned int __maxlevel = __get_cpuid_max(__ext, 0); if (__maxlevel == 0 || __maxlevel < __leaf) return 0; __cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx); return 1; } #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* __x86_64__ */ #endif /* COSMOPOLITAN_THIRD_PARTY_INTEL_CPUID_INTERNAL_H_ */
7,638
238
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vbmi2vlintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512vbmi2vlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VBMI2VLINTRIN_H_INCLUDED #define _AVX512VBMI2VLINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) #pragma GCC push_options #pragma GCC target("avx512vbmi2,avx512vl") #define __DISABLE_AVX512VBMI2VL__ #endif /* __AVX512VBMIVL__ */ __funline __m128i _mm_mask_compress_epi8(__m128i __A, __mmask16 __B, __m128i __C) { return (__m128i)__builtin_ia32_compressqi128_mask((__v16qi)__C, (__v16qi)__A, (__mmask16)__B); } __funline __m128i _mm_maskz_compress_epi8(__mmask16 __A, __m128i __B) { return (__m128i)__builtin_ia32_compressqi128_mask( (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__A); } __funline void _mm256_mask_compressstoreu_epi16(void *__A, __mmask16 __B, __m256i __C) { __builtin_ia32_compressstoreuhi256_mask((__v16hi *)__A, (__v16hi)__C, (__mmask16)__B); } __funline __m128i _mm_mask_compress_epi16(__m128i __A, __mmask8 __B, __m128i __C) { return (__m128i)__builtin_ia32_compresshi128_mask((__v8hi)__C, (__v8hi)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_compress_epi16(__mmask8 __A, __m128i __B) { return (__m128i)__builtin_ia32_compresshi128_mask( (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__A); } __funline __m256i _mm256_mask_compress_epi16(__m256i __A, __mmask16 __B, __m256i __C) { return (__m256i)__builtin_ia32_compresshi256_mask((__v16hi)__C, (__v16hi)__A, (__mmask16)__B); } __funline __m256i _mm256_maskz_compress_epi16(__mmask16 __A, __m256i __B) { return (__m256i)__builtin_ia32_compresshi256_mask( (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__A); } __funline void _mm_mask_compressstoreu_epi8(void *__A, __mmask16 __B, __m128i __C) { __builtin_ia32_compressstoreuqi128_mask((__v16qi *)__A, (__v16qi)__C, (__mmask16)__B); } __funline void _mm_mask_compressstoreu_epi16(void *__A, __mmask8 __B, __m128i __C) { __builtin_ia32_compressstoreuhi128_mask((__v8hi *)__A, (__v8hi)__C, (__mmask8)__B); } __funline __m128i _mm_mask_expand_epi8(__m128i __A, __mmask16 __B, __m128i __C) { return (__m128i)__builtin_ia32_expandqi128_mask((__v16qi)__C, (__v16qi)__A, (__mmask16)__B); } __funline __m128i _mm_maskz_expand_epi8(__mmask16 __A, __m128i __B) { return (__m128i)__builtin_ia32_expandqi128_maskz( (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__A); } __funline __m128i _mm_mask_expandloadu_epi8(__m128i __A, __mmask16 __B, const void *__C) { return (__m128i)__builtin_ia32_expandloadqi128_mask( (const __v16qi *)__C, (__v16qi)__A, (__mmask16)__B); } __funline __m128i _mm_maskz_expandloadu_epi8(__mmask16 __A, const void *__B) { return (__m128i)__builtin_ia32_expandloadqi128_maskz( (const __v16qi *)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__A); } __funline __m128i _mm_mask_expand_epi16(__m128i __A, __mmask8 __B, __m128i __C) { return (__m128i)__builtin_ia32_expandhi128_mask((__v8hi)__C, (__v8hi)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_expand_epi16(__mmask8 __A, __m128i __B) { return (__m128i)__builtin_ia32_expandhi128_maskz( (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_mask_expandloadu_epi16(__m128i __A, __mmask8 __B, const void *__C) { return (__m128i)__builtin_ia32_expandloadhi128_mask( (const __v8hi *)__C, (__v8hi)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_expandloadu_epi16(__mmask8 __A, const void *__B) { return (__m128i)__builtin_ia32_expandloadhi128_maskz( (const __v8hi *)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__A); } __funline __m256i _mm256_mask_expand_epi16(__m256i __A, __mmask16 __B, __m256i __C) { return (__m256i)__builtin_ia32_expandhi256_mask((__v16hi)__C, (__v16hi)__A, (__mmask16)__B); } __funline __m256i _mm256_maskz_expand_epi16(__mmask16 __A, __m256i __B) { return (__m256i)__builtin_ia32_expandhi256_maskz( (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__A); } __funline __m256i _mm256_mask_expandloadu_epi16(__m256i __A, __mmask16 __B, const void *__C) { return (__m256i)__builtin_ia32_expandloadhi256_mask( (const __v16hi *)__C, (__v16hi)__A, (__mmask16)__B); } __funline __m256i _mm256_maskz_expandloadu_epi16(__mmask16 __A, const void *__B) { return (__m256i)__builtin_ia32_expandloadhi256_maskz( (const __v16hi *)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__A); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_shrdi_epi16(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshrd_v16hi((__v16hi)__A, (__v16hi)__B, __C); } __funline __m256i _mm256_mask_shrdi_epi16(__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v16hi_mask( (__v16hi)__C, (__v16hi)__D, __E, (__v16hi)__A, (__mmask16)__B); } __funline __m256i _mm256_maskz_shrdi_epi16(__mmask16 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v16hi_mask( (__v16hi)__B, (__v16hi)__C, __D, (__v16hi)_mm256_setzero_si256(), (__mmask16)__A); } __funline __m256i _mm256_mask_shrdi_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v8si_mask((__v8si)__C, (__v8si)__D, __E, (__v8si)__A, (__mmask8)__B); } __funline __m256i _mm256_maskz_shrdi_epi32(__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v8si_mask( (__v8si)__B, (__v8si)__C, __D, (__v8si)_mm256_setzero_si256(), (__mmask8)__A); } __funline __m256i _mm256_shrdi_epi32(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshrd_v8si((__v8si)__A, (__v8si)__B, __C); } __funline __m256i _mm256_mask_shrdi_epi64(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshrd_v4di_mask((__v4di)__C, (__v4di)__D, __E, (__v4di)__A, (__mmask8)__B); } __funline __m256i _mm256_maskz_shrdi_epi64(__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshrd_v4di_mask( (__v4di)__B, (__v4di)__C, __D, (__v4di)_mm256_setzero_si256(), (__mmask8)__A); } __funline __m256i _mm256_shrdi_epi64(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshrd_v4di((__v4di)__A, (__v4di)__B, __C); } __funline __m128i _mm_mask_shrdi_epi16(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v8hi_mask((__v8hi)__C, (__v8hi)__D, __E, (__v8hi)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdi_epi16(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v8hi_mask((__v8hi)__B, (__v8hi)__C, __D, (__v8hi)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shrdi_epi16(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshrd_v8hi((__v8hi)__A, (__v8hi)__B, __C); } __funline __m128i _mm_mask_shrdi_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v4si_mask((__v4si)__C, (__v4si)__D, __E, (__v4si)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdi_epi32(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v4si_mask((__v4si)__B, (__v4si)__C, __D, (__v4si)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shrdi_epi32(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshrd_v4si((__v4si)__A, (__v4si)__B, __C); } __funline __m128i _mm_mask_shrdi_epi64(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshrd_v2di_mask((__v2di)__C, (__v2di)__D, __E, (__v2di)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdi_epi64(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshrd_v2di_mask((__v2di)__B, (__v2di)__C, __D, (__v2di)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shrdi_epi64(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshrd_v2di((__v2di)__A, (__v2di)__B, __C); } __funline __m256i _mm256_shldi_epi16(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshld_v16hi((__v16hi)__A, (__v16hi)__B, __C); } __funline __m256i _mm256_mask_shldi_epi16(__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v16hi_mask( (__v16hi)__C, (__v16hi)__D, __E, (__v16hi)__A, (__mmask16)__B); } __funline __m256i _mm256_maskz_shldi_epi16(__mmask16 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v16hi_mask( (__v16hi)__B, (__v16hi)__C, __D, (__v16hi)_mm256_setzero_si256(), (__mmask16)__A); } __funline __m256i _mm256_mask_shldi_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v8si_mask((__v8si)__C, (__v8si)__D, __E, (__v8si)__A, (__mmask8)__B); } __funline __m256i _mm256_maskz_shldi_epi32(__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v8si_mask( (__v8si)__B, (__v8si)__C, __D, (__v8si)_mm256_setzero_si256(), (__mmask8)__A); } __funline __m256i _mm256_shldi_epi32(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshld_v8si((__v8si)__A, (__v8si)__B, __C); } __funline __m256i _mm256_mask_shldi_epi64(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, int __E) { return (__m256i)__builtin_ia32_vpshld_v4di_mask((__v4di)__C, (__v4di)__D, __E, (__v4di)__A, (__mmask8)__B); } __funline __m256i _mm256_maskz_shldi_epi64(__mmask8 __A, __m256i __B, __m256i __C, int __D) { return (__m256i)__builtin_ia32_vpshld_v4di_mask( (__v4di)__B, (__v4di)__C, __D, (__v4di)_mm256_setzero_si256(), (__mmask8)__A); } __funline __m256i _mm256_shldi_epi64(__m256i __A, __m256i __B, int __C) { return (__m256i)__builtin_ia32_vpshld_v4di((__v4di)__A, (__v4di)__B, __C); } __funline __m128i _mm_mask_shldi_epi16(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v8hi_mask((__v8hi)__C, (__v8hi)__D, __E, (__v8hi)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shldi_epi16(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v8hi_mask((__v8hi)__B, (__v8hi)__C, __D, (__v8hi)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shldi_epi16(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshld_v8hi((__v8hi)__A, (__v8hi)__B, __C); } __funline __m128i _mm_mask_shldi_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v4si_mask((__v4si)__C, (__v4si)__D, __E, (__v4si)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shldi_epi32(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v4si_mask((__v4si)__B, (__v4si)__C, __D, (__v4si)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shldi_epi32(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshld_v4si((__v4si)__A, (__v4si)__B, __C); } __funline __m128i _mm_mask_shldi_epi64(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, int __E) { return (__m128i)__builtin_ia32_vpshld_v2di_mask((__v2di)__C, (__v2di)__D, __E, (__v2di)__A, (__mmask8)__B); } __funline __m128i _mm_maskz_shldi_epi64(__mmask8 __A, __m128i __B, __m128i __C, int __D) { return (__m128i)__builtin_ia32_vpshld_v2di_mask((__v2di)__B, (__v2di)__C, __D, (__v2di)_mm_setzero_si128(), (__mmask8)__A); } __funline __m128i _mm_shldi_epi64(__m128i __A, __m128i __B, int __C) { return (__m128i)__builtin_ia32_vpshld_v2di((__v2di)__A, (__v2di)__B, __C); } #else #define _mm256_shrdi_epi16(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)(__m256i)(A), \ (__v16hi)(__m256i)(B),(int)(C)) #define _mm256_mask_shrdi_epi16(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(C), \ (__v16hi)(__m256i)(D), (int)(E), (__v16hi)(__m256i)(A),(__mmask16)(B)) #define _mm256_maskz_shrdi_epi16(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(B), \ (__v16hi)(__m256i)(C),(int)(D), \ (__v16hi)(__m256i)_mm256_setzero_si256 (), (__mmask16)(A)) #define _mm256_shrdi_epi32(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)(__m256i)(A), \ (__v8si)(__m256i)(B),(int)(C)) #define _mm256_mask_shrdi_epi32(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(C), \ (__v8si)(__m256i)(D), (int)(E), (__v8si)(__m256i)(A),(__mmask8)(B)) #define _mm256_maskz_shrdi_epi32(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(B), \ (__v8si)(__m256i)(C),(int)(D), \ (__v8si)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A)) #define _mm256_shrdi_epi64(A, B, C) \ ((__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B),(int)(C)) #define _mm256_mask_shrdi_epi64(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(C), \ (__v4di)(__m256i)(D), (int)(E), (__v4di)(__m256i)(A),(__mmask8)(B)) #define _mm256_maskz_shrdi_epi64(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(B), \ (__v4di)(__m256i)(C),(int)(D), \ (__v4di)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A)) #define _mm_shrdi_epi16(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)(__m128i)(A), \ (__v8hi)(__m128i)(B),(int)(C)) #define _mm_mask_shrdi_epi16(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(C), \ (__v8hi)(__m128i)(D), (int)(E), (__v8hi)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shrdi_epi16(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(B), \ (__v8hi)(__m128i)(C),(int)(D), \ (__v8hi)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #define _mm_shrdi_epi32(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)(__m128i)(A), \ (__v4si)(__m128i)(B),(int)(C)) #define _mm_mask_shrdi_epi32(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(C), \ (__v4si)(__m128i)(D), (int)(E), (__v4si)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shrdi_epi32(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(B), \ (__v4si)(__m128i)(C),(int)(D), \ (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #define _mm_shrdi_epi64(A, B, C) \ ((__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)(__m128i)(A), \ (__v2di)(__m128i)(B),(int)(C)) #define _mm_mask_shrdi_epi64(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(C), \ (__v2di)(__m128i)(D), (int)(E), (__v2di)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shrdi_epi64(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(B), \ (__v2di)(__m128i)(C),(int)(D), \ (__v2di)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #define _mm256_shldi_epi16(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)(__m256i)(A), \ (__v16hi)(__m256i)(B),(int)(C)) #define _mm256_mask_shldi_epi16(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(C), \ (__v16hi)(__m256i)(D), (int)(E), (__v16hi)(__m256i)(A),(__mmask16)(B)) #define _mm256_maskz_shldi_epi16(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(B), \ (__v16hi)(__m256i)(C),(int)(D), \ (__v16hi)(__m256i)_mm256_setzero_si256 (), (__mmask16)(A)) #define _mm256_shldi_epi32(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v8si ((__v8si)(__m256i)(A), \ (__v8si)(__m256i)(B),(int)(C)) #define _mm256_mask_shldi_epi32(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(C), \ (__v8si)(__m256i)(D), (int)(E), (__v8si)(__m256i)(A),(__mmask8)(B)) #define _mm256_maskz_shldi_epi32(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(B), \ (__v8si)(__m256i)(C),(int)(D), \ (__v8si)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A)) #define _mm256_shldi_epi64(A, B, C) \ ((__m256i) __builtin_ia32_vpshld_v4di ((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B),(int)(C)) #define _mm256_mask_shldi_epi64(A, B, C, D, E) \ ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(C), \ (__v4di)(__m256i)(D), (int)(E), (__v4di)(__m256i)(A),(__mmask8)(B)) #define _mm256_maskz_shldi_epi64(A, B, C, D) \ ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(B), \ (__v4di)(__m256i)(C),(int)(D), \ (__v4di)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A)) #define _mm_shldi_epi16(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)(__m128i)(A), \ (__v8hi)(__m128i)(B),(int)(C)) #define _mm_mask_shldi_epi16(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(C), \ (__v8hi)(__m128i)(D), (int)(E), (__v8hi)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shldi_epi16(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(B), \ (__v8hi)(__m128i)(C),(int)(D), \ (__v8hi)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #define _mm_shldi_epi32(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v4si ((__v4si)(__m128i)(A), \ (__v4si)(__m128i)(B),(int)(C)) #define _mm_mask_shldi_epi32(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(C), \ (__v4si)(__m128i)(D), (int)(E), (__v4si)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shldi_epi32(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(B), \ (__v4si)(__m128i)(C),(int)(D), \ (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #define _mm_shldi_epi64(A, B, C) \ ((__m128i) __builtin_ia32_vpshld_v2di ((__v2di)(__m128i)(A), \ (__v2di)(__m128i)(B),(int)(C)) #define _mm_mask_shldi_epi64(A, B, C, D, E) \ ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(C), \ (__v2di)(__m128i)(D), (int)(E), (__v2di)(__m128i)(A),(__mmask8)(B)) #define _mm_maskz_shldi_epi64(A, B, C, D) \ ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(B), \ (__v2di)(__m128i)(C),(int)(D), \ (__v2di)(__m128i)_mm_setzero_si128 (), (__mmask8)(A)) #endif __funline __m256i _mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshrdv_v16hi((__v16hi)__A, (__v16hi)__B, (__v16hi)__C); } __funline __m256i _mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v16hi_mask( (__v16hi)__A, (__v16hi)__C, (__v16hi)__D, (__mmask16)__B); } __funline __m256i _mm256_maskz_shrdv_epi16(__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v16hi_maskz( (__v16hi)__B, (__v16hi)__C, (__v16hi)__D, (__mmask16)__A); } __funline __m256i _mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshrdv_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v8si_mask((__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_shrdv_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v8si_maskz((__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m256i _mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshrdv_v4di((__v4di)__A, (__v4di)__B, (__v4di)__C); } __funline __m256i _mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v4di_mask((__v4di)__A, (__v4di)__C, (__v4di)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_shrdv_epi64(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshrdv_v4di_maskz((__v4di)__B, (__v4di)__C, (__v4di)__D, (__mmask8)__A); } __funline __m128i _mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshrdv_v8hi((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); } __funline __m128i _mm_mask_shrdv_epi16(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v8hi_mask((__v8hi)__A, (__v8hi)__C, (__v8hi)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdv_epi16(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v8hi_maskz((__v8hi)__B, (__v8hi)__C, (__v8hi)__D, (__mmask8)__A); } __funline __m128i _mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshrdv_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_shrdv_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v4si_mask((__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdv_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v4si_maskz((__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } __funline __m128i _mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshrdv_v2di((__v2di)__A, (__v2di)__B, (__v2di)__C); } __funline __m128i _mm_mask_shrdv_epi64(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v2di_mask((__v2di)__A, (__v2di)__C, (__v2di)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shrdv_epi64(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshrdv_v2di_maskz((__v2di)__B, (__v2di)__C, (__v2di)__D, (__mmask8)__A); } __funline __m256i _mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshldv_v16hi((__v16hi)__A, (__v16hi)__B, (__v16hi)__C); } __funline __m256i _mm256_mask_shldv_epi16(__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v16hi_mask( (__v16hi)__A, (__v16hi)__C, (__v16hi)__D, (__mmask16)__B); } __funline __m256i _mm256_maskz_shldv_epi16(__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v16hi_maskz( (__v16hi)__B, (__v16hi)__C, (__v16hi)__D, (__mmask16)__A); } __funline __m256i _mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshldv_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_shldv_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v8si_mask((__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_shldv_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v8si_maskz((__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m256i _mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpshldv_v4di((__v4di)__A, (__v4di)__B, (__v4di)__C); } __funline __m256i _mm256_mask_shldv_epi64(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v4di_mask((__v4di)__A, (__v4di)__C, (__v4di)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_shldv_epi64(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpshldv_v4di_maskz((__v4di)__B, (__v4di)__C, (__v4di)__D, (__mmask8)__A); } __funline __m128i _mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshldv_v8hi((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); } __funline __m128i _mm_mask_shldv_epi16(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v8hi_mask((__v8hi)__A, (__v8hi)__C, (__v8hi)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shldv_epi16(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v8hi_maskz((__v8hi)__B, (__v8hi)__C, (__v8hi)__D, (__mmask8)__A); } __funline __m128i _mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshldv_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_shldv_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v4si_mask((__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shldv_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v4si_maskz((__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } __funline __m128i _mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpshldv_v2di((__v2di)__A, (__v2di)__B, (__v2di)__C); } __funline __m128i _mm_mask_shldv_epi64(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v2di_mask((__v2di)__A, (__v2di)__C, (__v2di)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_shldv_epi64(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpshldv_v2di_maskz((__v2di)__B, (__v2di)__C, (__v2di)__D, (__mmask8)__A); } #ifdef __DISABLE_AVX512VBMI2VL__ #undef __DISABLE_AVX512VBMI2VL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMIVL__ */ #if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) || \ !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("avx512vbmi2,avx512vl,avx512bw") #define __DISABLE_AVX512VBMI2VLBW__ #endif /* __AVX512VBMIVLBW__ */ __funline __m256i _mm256_mask_compress_epi8(__m256i __A, __mmask32 __B, __m256i __C) { return (__m256i)__builtin_ia32_compressqi256_mask((__v32qi)__C, (__v32qi)__A, (__mmask32)__B); } __funline __m256i _mm256_maskz_compress_epi8(__mmask32 __A, __m256i __B) { return (__m256i)__builtin_ia32_compressqi256_mask( (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__A); } __funline void _mm256_mask_compressstoreu_epi8(void *__A, __mmask32 __B, __m256i __C) { __builtin_ia32_compressstoreuqi256_mask((__v32qi *)__A, (__v32qi)__C, (__mmask32)__B); } __funline __m256i _mm256_mask_expand_epi8(__m256i __A, __mmask32 __B, __m256i __C) { return (__m256i)__builtin_ia32_expandqi256_mask((__v32qi)__C, (__v32qi)__A, (__mmask32)__B); } __funline __m256i _mm256_maskz_expand_epi8(__mmask32 __A, __m256i __B) { return (__m256i)__builtin_ia32_expandqi256_maskz( (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__A); } __funline __m256i _mm256_mask_expandloadu_epi8(__m256i __A, __mmask32 __B, const void *__C) { return (__m256i)__builtin_ia32_expandloadqi256_mask( (const __v32qi *)__C, (__v32qi)__A, (__mmask32)__B); } __funline __m256i _mm256_maskz_expandloadu_epi8(__mmask32 __A, const void *__B) { return (__m256i)__builtin_ia32_expandloadqi256_maskz( (const __v32qi *)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__A); } #ifdef __DISABLE_AVX512VBMI2VLBW__ #undef __DISABLE_AVX512VBMI2VLBW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMIVLBW__ */ #endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */
33,620
717
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx5124fmapsintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <avx5124fmapsintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _AVX5124FMAPSINTRIN_H_INCLUDED #define _AVX5124FMAPSINTRIN_H_INCLUDED #ifndef __AVX5124FMAPS__ #pragma GCC push_options #pragma GCC target("avx5124fmaps") #define __DISABLE_AVX5124FMAPS__ #endif /* __AVX5124FMAPS__ */ __funline __m512 _mm512_4fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fmaddps((__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F); } __funline __m512 _mm512_mask_4fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fmaddps_mask( (__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F, (__v16sf)__A, (__mmask16)__U); } __funline __m512 _mm512_maskz_4fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fmaddps_mask( (__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m128 _mm_4fmadd_ss(__m128 __A, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fmaddss((__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F); } __funline __m128 _mm_mask_4fmadd_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fmaddss_mask( (__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F, (__v4sf)__A, (__mmask8)__U); } __funline __m128 _mm_maskz_4fmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fmaddss_mask( (__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m512 _mm512_4fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fnmaddps((__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F); } __funline __m512 _mm512_mask_4fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fnmaddps_mask( (__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F, (__v16sf)__A, (__mmask16)__U); } __funline __m512 _mm512_maskz_4fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, __m512 __D, __m512 __E, __m128 *__F) { return (__m512)__builtin_ia32_4fnmaddps_mask( (__v16sf)__B, (__v16sf)__C, (__v16sf)__D, (__v16sf)__E, (__v16sf)__A, (const __v4sf *)__F, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m128 _mm_4fnmadd_ss(__m128 __A, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fnmaddss((__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F); } __funline __m128 _mm_mask_4fnmadd_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fnmaddss_mask( (__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F, (__v4sf)__A, (__mmask8)__U); } __funline __m128 _mm_maskz_4fnmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C, __m128 __D, __m128 __E, __m128 *__F) { return (__m128)__builtin_ia32_4fnmaddss_mask( (__v4sf)__B, (__v4sf)__C, (__v4sf)__D, (__v4sf)__E, (__v4sf)__A, (const __v4sf *)__F, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } #ifdef __DISABLE_AVX5124FMAPS__ #undef __DISABLE_AVX5124FMAPS__ #pragma GCC pop_options #endif /* __DISABLE_AVX5124FMAPS__ */ #endif /* _AVX5124FMAPSINTRIN_H_INCLUDED */
5,069
113
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512bitalgintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512bitalgintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _AVX512BITALGINTRIN_H_INCLUDED #define _AVX512BITALGINTRIN_H_INCLUDED #ifndef __AVX512BITALG__ #pragma GCC push_options #pragma GCC target("avx512bitalg") #define __DISABLE_AVX512BITALG__ #endif /* __AVX512BITALG__ */ __funline __m512i _mm512_popcnt_epi8(__m512i __A) { return (__m512i)__builtin_ia32_vpopcountb_v64qi((__v64qi)__A); } __funline __m512i _mm512_popcnt_epi16(__m512i __A) { return (__m512i)__builtin_ia32_vpopcountw_v32hi((__v32hi)__A); } #ifdef __DISABLE_AVX512BITALG__ #undef __DISABLE_AVX512BITALG__ #pragma GCC pop_options #endif /* __DISABLE_AVX512BITALG__ */ #if !defined(__AVX512BITALG__) || !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("avx512bitalg,avx512bw") #define __DISABLE_AVX512BITALGBW__ #endif /* __AVX512VLBW__ */ __funline __m512i _mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpopcountb_v64qi_mask( (__v64qi)__A, (__v64qi)__B, (__mmask64)__U); } __funline __m512i _mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpopcountb_v64qi_mask( (__v64qi)__A, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpopcountw_v32hi_mask( (__v32hi)__A, (__v32hi)__B, (__mmask32)__U); } __funline __m512i _mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpopcountw_v32hi_mask( (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __mmask64 _mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_vpshufbitqmb512_mask( (__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __mmask64 _mm512_mask_bitshuffle_epi64_mask(__mmask64 __M, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_vpshufbitqmb512_mask( (__v64qi)__A, (__v64qi)__B, (__mmask64)__M); } #ifdef __DISABLE_AVX512BITALGBW__ #undef __DISABLE_AVX512BITALGBW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512BITALGBW__ */ #if !defined(__AVX512BITALG__) || !defined(__AVX512VL__) || \ !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("avx512bitalg,avx512vl,avx512bw") #define __DISABLE_AVX512BITALGVLBW__ #endif /* __AVX512VLBW__ */ __funline __m256i _mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpopcountb_v32qi_mask( (__v32qi)__A, (__v32qi)__B, (__mmask32)__U); } __funline __m256i _mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpopcountb_v32qi_mask( (__v32qi)__A, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __mmask32 _mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_vpshufbitqmb256_mask( (__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __mmask32 _mm256_mask_bitshuffle_epi64_mask(__mmask32 __M, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_vpshufbitqmb256_mask( (__v32qi)__A, (__v32qi)__B, (__mmask32)__M); } #ifdef __DISABLE_AVX512BITALGVLBW__ #undef __DISABLE_AVX512BITALGVLBW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512BITALGVLBW__ */ #if !defined(__AVX512BITALG__) || !defined(__AVX512VL__) #pragma GCC push_options #pragma GCC target("avx512bitalg,avx512vl") #define __DISABLE_AVX512BITALGVL__ #endif /* __AVX512VLBW__ */ __funline __mmask16 _mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_vpshufbitqmb128_mask( (__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __mmask16 _mm_mask_bitshuffle_epi64_mask(__mmask16 __M, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_vpshufbitqmb128_mask( (__v16qi)__A, (__v16qi)__B, (__mmask16)__M); } __funline __m256i _mm256_popcnt_epi8(__m256i __A) { return (__m256i)__builtin_ia32_vpopcountb_v32qi((__v32qi)__A); } __funline __m256i _mm256_popcnt_epi16(__m256i __A) { return (__m256i)__builtin_ia32_vpopcountw_v16hi((__v16hi)__A); } __funline __m128i _mm_popcnt_epi8(__m128i __A) { return (__m128i)__builtin_ia32_vpopcountb_v16qi((__v16qi)__A); } __funline __m128i _mm_popcnt_epi16(__m128i __A) { return (__m128i)__builtin_ia32_vpopcountw_v8hi((__v8hi)__A); } __funline __m256i _mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpopcountw_v16hi_mask( (__v16hi)__A, (__v16hi)__B, (__mmask16)__U); } __funline __m256i _mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpopcountw_v16hi_mask( (__v16hi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpopcountb_v16qi_mask( (__v16qi)__A, (__v16qi)__B, (__mmask16)__U); } __funline __m128i _mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpopcountb_v16qi_mask( (__v16qi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpopcountw_v8hi_mask((__v8hi)__A, (__v8hi)__B, (__mmask8)__U); } __funline __m128i _mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpopcountw_v8hi_mask( (__v8hi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } #ifdef __DISABLE_AVX512BITALGVL__ #undef __DISABLE_AVX512BITALGVL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512BITALGBW__ */ #endif /* _AVX512BITALGINTRIN_H_INCLUDED */
6,162
173
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx2intrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx2intrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX2INTRIN_H_INCLUDED #define _AVX2INTRIN_H_INCLUDED #ifndef __AVX2__ #pragma GCC push_options #pragma GCC target("avx2") #define __DISABLE_AVX2__ #endif /* __AVX2__ */ #ifdef __OPTIMIZE__ __funline __m256i _mm256_mpsadbw_epu8(__m256i __X, __m256i __Y, const int __M) { return (__m256i)__builtin_ia32_mpsadbw256((__v32qi)__X, (__v32qi)__Y, __M); } #else #define _mm256_mpsadbw_epu8(X, Y, M) \ ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ (__v32qi)(__m256i)(Y), (int)(M))) #endif __funline __m256i _mm256_abs_epi8(__m256i __A) { return (__m256i)__builtin_ia32_pabsb256((__v32qi)__A); } __funline __m256i _mm256_abs_epi16(__m256i __A) { return (__m256i)__builtin_ia32_pabsw256((__v16hi)__A); } __funline __m256i _mm256_abs_epi32(__m256i __A) { return (__m256i)__builtin_ia32_pabsd256((__v8si)__A); } __funline __m256i _mm256_packs_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packssdw256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_packs_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packsswb256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_packus_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packusdw256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_packus_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packuswb256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_add_epi8(__m256i __A, __m256i __B) { return (__m256i)((__v32qu)__A + (__v32qu)__B); } __funline __m256i _mm256_add_epi16(__m256i __A, __m256i __B) { return (__m256i)((__v16hu)__A + (__v16hu)__B); } __funline __m256i _mm256_add_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8su)__A + (__v8su)__B); } __funline __m256i _mm256_add_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A + (__v4du)__B); } __funline __m256i _mm256_adds_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_adds_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_adds_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_adds_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusw256((__v16hi)__A, (__v16hi)__B); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_alignr_epi8(__m256i __A, __m256i __B, const int __N) { return (__m256i)__builtin_ia32_palignr256((__v4di)__A, (__v4di)__B, __N * 8); } #else #define _mm256_alignr_epi8(A, B, N) \ ((__m256i)__builtin_ia32_palignr256((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B), (int)(N)*8)) #endif __funline __m256i _mm256_and_si256(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A & (__v4du)__B); } __funline __m256i _mm256_andnot_si256(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_andnotsi256((__v4di)__A, (__v4di)__B); } __funline __m256i _mm256_avg_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_avg_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_blendv_epi8(__m256i __X, __m256i __Y, __m256i __M) { return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__X, (__v32qi)__Y, (__v32qi)__M); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_blend_epi16(__m256i __X, __m256i __Y, const int __M) { return (__m256i)__builtin_ia32_pblendw256((__v16hi)__X, (__v16hi)__Y, __M); } #else #define _mm256_blend_epi16(X, Y, M) \ ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(X), \ (__v16hi)(__m256i)(Y), (int)(M))) #endif __funline __m256i _mm256_cmpeq_epi8(__m256i __A, __m256i __B) { return (__m256i)((__v32qi)__A == (__v32qi)__B); } __funline __m256i _mm256_cmpeq_epi16(__m256i __A, __m256i __B) { return (__m256i)((__v16hi)__A == (__v16hi)__B); } __funline __m256i _mm256_cmpeq_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8si)__A == (__v8si)__B); } __funline __m256i _mm256_cmpeq_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4di)__A == (__v4di)__B); } __funline __m256i _mm256_cmpgt_epi8(__m256i __A, __m256i __B) { return (__m256i)((__v32qi)__A > (__v32qi)__B); } __funline __m256i _mm256_cmpgt_epi16(__m256i __A, __m256i __B) { return (__m256i)((__v16hi)__A > (__v16hi)__B); } __funline __m256i _mm256_cmpgt_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8si)__A > (__v8si)__B); } __funline __m256i _mm256_cmpgt_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4di)__A > (__v4di)__B); } __funline __m256i _mm256_hadd_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phaddw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_hadd_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phaddd256((__v8si)__X, (__v8si)__Y); } __funline __m256i _mm256_hadds_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_hsub_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phsubw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_hsub_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phsubd256((__v8si)__X, (__v8si)__Y); } __funline __m256i _mm256_hsubs_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_maddubs_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__X, (__v32qi)__Y); } __funline __m256i _mm256_madd_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_max_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_max_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_max_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_max_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_max_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_max_epu32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxud256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_min_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_min_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_min_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsd256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_min_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminub256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_min_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_min_epu32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminud256((__v8si)__A, (__v8si)__B); } __funline int _mm256_movemask_epi8(__m256i __A) { return __builtin_ia32_pmovmskb256((__v32qi)__A); } __funline __m256i _mm256_cvtepi8_epi16(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__X); } __funline __m256i _mm256_cvtepi8_epi32(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__X); } __funline __m256i _mm256_cvtepi8_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__X); } __funline __m256i _mm256_cvtepi16_epi32(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__X); } __funline __m256i _mm256_cvtepi16_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__X); } __funline __m256i _mm256_cvtepi32_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__X); } __funline __m256i _mm256_cvtepu8_epi16(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__X); } __funline __m256i _mm256_cvtepu8_epi32(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__X); } __funline __m256i _mm256_cvtepu8_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__X); } __funline __m256i _mm256_cvtepu16_epi32(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__X); } __funline __m256i _mm256_cvtepu16_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__X); } __funline __m256i _mm256_cvtepu32_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__X); } __funline __m256i _mm256_mul_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmuldq256((__v8si)__X, (__v8si)__Y); } __funline __m256i _mm256_mulhrs_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_mulhi_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_mulhi_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_mullo_epi16(__m256i __A, __m256i __B) { return (__m256i)((__v16hu)__A * (__v16hu)__B); } __funline __m256i _mm256_mullo_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8su)__A * (__v8su)__B); } __funline __m256i _mm256_mul_epu32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmuludq256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_or_si256(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A | (__v4du)__B); } __funline __m256i _mm256_sad_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psadbw256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_shuffle_epi8(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pshufb256((__v32qi)__X, (__v32qi)__Y); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_shuffle_epi32(__m256i __A, const int __mask) { return (__m256i)__builtin_ia32_pshufd256((__v8si)__A, __mask); } __funline __m256i _mm256_shufflehi_epi16(__m256i __A, const int __mask) { return (__m256i)__builtin_ia32_pshufhw256((__v16hi)__A, __mask); } __funline __m256i _mm256_shufflelo_epi16(__m256i __A, const int __mask) { return (__m256i)__builtin_ia32_pshuflw256((__v16hi)__A, __mask); } #else #define _mm256_shuffle_epi32(A, N) \ ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(A), (int)(N))) #define _mm256_shufflehi_epi16(A, N) \ ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(A), (int)(N))) #define _mm256_shufflelo_epi16(A, N) \ ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(A), (int)(N))) #endif __funline __m256i _mm256_sign_epi8(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psignb256((__v32qi)__X, (__v32qi)__Y); } __funline __m256i _mm256_sign_epi16(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psignw256((__v16hi)__X, (__v16hi)__Y); } __funline __m256i _mm256_sign_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psignd256((__v8si)__X, (__v8si)__Y); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_bslli_epi128(__m256i __A, const int __N) { return (__m256i)__builtin_ia32_pslldqi256(__A, __N * 8); } __funline __m256i _mm256_slli_si256(__m256i __A, const int __N) { return (__m256i)__builtin_ia32_pslldqi256(__A, __N * 8); } #else #define _mm256_bslli_epi128(A, N) \ ((__m256i)__builtin_ia32_pslldqi256((__m256i)(A), (int)(N)*8)) #define _mm256_slli_si256(A, N) \ ((__m256i)__builtin_ia32_pslldqi256((__m256i)(A), (int)(N)*8)) #endif __funline __m256i _mm256_slli_epi16(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psllwi256((__v16hi)__A, __B); } __funline __m256i _mm256_sll_epi16(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllw256((__v16hi)__A, (__v8hi)__B); } __funline __m256i _mm256_slli_epi32(__m256i __A, int __B) { return (__m256i)__builtin_ia32_pslldi256((__v8si)__A, __B); } __funline __m256i _mm256_sll_epi32(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_pslld256((__v8si)__A, (__v4si)__B); } __funline __m256i _mm256_slli_epi64(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psllqi256((__v4di)__A, __B); } __funline __m256i _mm256_sll_epi64(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllq256((__v4di)__A, (__v2di)__B); } __funline __m256i _mm256_srai_epi16(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psrawi256((__v16hi)__A, __B); } __funline __m256i _mm256_sra_epi16(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraw256((__v16hi)__A, (__v8hi)__B); } __funline __m256i _mm256_srai_epi32(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psradi256((__v8si)__A, __B); } __funline __m256i _mm256_sra_epi32(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrad256((__v8si)__A, (__v4si)__B); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_bsrli_epi128(__m256i __A, const int __N) { return (__m256i)__builtin_ia32_psrldqi256(__A, __N * 8); } __funline __m256i _mm256_srli_si256(__m256i __A, const int __N) { return (__m256i)__builtin_ia32_psrldqi256(__A, __N * 8); } #else #define _mm256_bsrli_epi128(A, N) \ ((__m256i)__builtin_ia32_psrldqi256((__m256i)(A), (int)(N)*8)) #define _mm256_srli_si256(A, N) \ ((__m256i)__builtin_ia32_psrldqi256((__m256i)(A), (int)(N)*8)) #endif __funline __m256i _mm256_srli_epi16(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__A, __B); } __funline __m256i _mm256_srl_epi16(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlw256((__v16hi)__A, (__v8hi)__B); } __funline __m256i _mm256_srli_epi32(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psrldi256((__v8si)__A, __B); } __funline __m256i _mm256_srl_epi32(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrld256((__v8si)__A, (__v4si)__B); } __funline __m256i _mm256_srli_epi64(__m256i __A, int __B) { return (__m256i)__builtin_ia32_psrlqi256((__v4di)__A, __B); } __funline __m256i _mm256_srl_epi64(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlq256((__v4di)__A, (__v2di)__B); } __funline __m256i _mm256_sub_epi8(__m256i __A, __m256i __B) { return (__m256i)((__v32qu)__A - (__v32qu)__B); } __funline __m256i _mm256_sub_epi16(__m256i __A, __m256i __B) { return (__m256i)((__v16hu)__A - (__v16hu)__B); } __funline __m256i _mm256_sub_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8su)__A - (__v8su)__B); } __funline __m256i _mm256_sub_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A - (__v4du)__B); } __funline __m256i _mm256_subs_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_subs_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_subs_epu8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusb256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_subs_epu16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusw256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_unpackhi_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhbw256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_unpackhi_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhwd256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_unpackhi_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhdq256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_unpackhi_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhqdq256((__v4di)__A, (__v4di)__B); } __funline __m256i _mm256_unpacklo_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklbw256((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_unpacklo_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklwd256((__v16hi)__A, (__v16hi)__B); } __funline __m256i _mm256_unpacklo_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckldq256((__v8si)__A, (__v8si)__B); } __funline __m256i _mm256_unpacklo_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklqdq256((__v4di)__A, (__v4di)__B); } __funline __m256i _mm256_xor_si256(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A ^ (__v4du)__B); } __funline __m256i _mm256_stream_load_si256(__m256i const *__X) { return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__X); } __funline __m128 _mm_broadcastss_ps(__m128 __X) { return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X); } __funline __m256 _mm256_broadcastss_ps(__m128 __X) { return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X); } __funline __m256d _mm256_broadcastsd_pd(__m128d __X) { return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X); } __funline __m256i _mm256_broadcastsi128_si256(__m128i __X) { return (__m256i)__builtin_ia32_vbroadcastsi256((__v2di)__X); } #ifdef __OPTIMIZE__ __funline __m128i _mm_blend_epi32(__m128i __X, __m128i __Y, const int __M) { return (__m128i)__builtin_ia32_pblendd128((__v4si)__X, (__v4si)__Y, __M); } #else #define _mm_blend_epi32(X, Y, M) \ ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(X), \ (__v4si)(__m128i)(Y), (int)(M))) #endif #ifdef __OPTIMIZE__ __funline __m256i _mm256_blend_epi32(__m256i __X, __m256i __Y, const int __M) { return (__m256i)__builtin_ia32_pblendd256((__v8si)__X, (__v8si)__Y, __M); } #else #define _mm256_blend_epi32(X, Y, M) \ ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(X), \ (__v8si)(__m256i)(Y), (int)(M))) #endif __funline __m256i _mm256_broadcastb_epi8(__m128i __X) { return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X); } __funline __m256i _mm256_broadcastw_epi16(__m128i __X) { return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X); } __funline __m256i _mm256_broadcastd_epi32(__m128i __X) { return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X); } __funline __m256i _mm256_broadcastq_epi64(__m128i __X) { return (__m256i)__builtin_ia32_pbroadcastq256((__v2di)__X); } __funline __m128i _mm_broadcastb_epi8(__m128i __X) { return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X); } __funline __m128i _mm_broadcastw_epi16(__m128i __X) { return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X); } __funline __m128i _mm_broadcastd_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X); } __funline __m128i _mm_broadcastq_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pbroadcastq128((__v2di)__X); } __funline __m256i _mm256_permutevar8x32_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvarsi256((__v8si)__X, (__v8si)__Y); } #ifdef __OPTIMIZE__ __funline __m256d _mm256_permute4x64_pd(__m256d __X, const int __M) { return (__m256d)__builtin_ia32_permdf256((__v4df)__X, __M); } #else #define _mm256_permute4x64_pd(X, M) \ ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(M))) #endif __funline __m256 _mm256_permutevar8x32_ps(__m256 __X, __m256i __Y) { return (__m256)__builtin_ia32_permvarsf256((__v8sf)__X, (__v8si)__Y); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_permute4x64_epi64(__m256i __X, const int __M) { return (__m256i)__builtin_ia32_permdi256((__v4di)__X, __M); } #else #define _mm256_permute4x64_epi64(X, M) \ ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(M))) #endif #ifdef __OPTIMIZE__ __funline __m256i _mm256_permute2x128_si256(__m256i __X, __m256i __Y, const int __M) { return (__m256i)__builtin_ia32_permti256((__v4di)__X, (__v4di)__Y, __M); } #else #define _mm256_permute2x128_si256(X, Y, M) \ ((__m256i)__builtin_ia32_permti256((__v4di)(__m256i)(X), \ (__v4di)(__m256i)(Y), (int)(M))) #endif #ifdef __OPTIMIZE__ __funline __m128i _mm256_extracti128_si256(__m256i __X, const int __M) { return (__m128i)__builtin_ia32_extract128i256((__v4di)__X, __M); } #else #define _mm256_extracti128_si256(X, M) \ ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(X), (int)(M))) #endif #ifdef __OPTIMIZE__ __funline __m256i _mm256_inserti128_si256(__m256i __X, __m128i __Y, const int __M) { return (__m256i)__builtin_ia32_insert128i256((__v4di)__X, (__v2di)__Y, __M); } #else #define _mm256_inserti128_si256(X, Y, M) \ ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(X), \ (__v2di)(__m128i)(Y), (int)(M))) #endif __funline __m256i _mm256_maskload_epi32(int const *__X, __m256i __M) { return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M); } __funline __m256i _mm256_maskload_epi64(long long const *__X, __m256i __M) { return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); } __funline __m128i _mm_maskload_epi32(int const *__X, __m128i __M) { return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M); } __funline __m128i _mm_maskload_epi64(long long const *__X, __m128i __M) { return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M); } __funline void _mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) { __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y); } __funline void _mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) { __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); } __funline void _mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) { __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y); } __funline void _mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) { __builtin_ia32_maskstoreq((__v2di *)__X, (__v2di)__M, (__v2di)__Y); } __funline __m256i _mm256_sllv_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y); } __funline __m128i _mm_sllv_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y); } __funline __m256i _mm256_sllv_epi64(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); } __funline __m128i _mm_sllv_epi64(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); } __funline __m256i _mm256_srav_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y); } __funline __m128i _mm_srav_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y); } __funline __m256i _mm256_srlv_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y); } __funline __m128i _mm_srlv_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y); } __funline __m256i _mm256_srlv_epi64(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); } __funline __m128i _mm_srlv_epi64(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); } #ifdef __OPTIMIZE__ __funline __m128d _mm_i32gather_pd(double const *__base, __m128i __index, const int __scale) { __v2df __zero = _mm_setzero_pd(); __v2df __mask = _mm_cmpeq_pd(__zero, __zero); return (__m128d)__builtin_ia32_gathersiv2df(_mm_undefined_pd(), __base, (__v4si)__index, __mask, __scale); } __funline __m128d _mm_mask_i32gather_pd(__m128d __src, double const *__base, __m128i __index, __m128d __mask, const int __scale) { return (__m128d)__builtin_ia32_gathersiv2df( (__v2df)__src, __base, (__v4si)__index, (__v2df)__mask, __scale); } __funline __m256d _mm256_i32gather_pd(double const *__base, __m128i __index, const int __scale) { __v4df __zero = _mm256_setzero_pd(); __v4df __mask = _mm256_cmp_pd(__zero, __zero, _CMP_EQ_OQ); return (__m256d)__builtin_ia32_gathersiv4df(_mm256_undefined_pd(), __base, (__v4si)__index, __mask, __scale); } __funline __m256d _mm256_mask_i32gather_pd(__m256d __src, double const *__base, __m128i __index, __m256d __mask, const int __scale) { return (__m256d)__builtin_ia32_gathersiv4df( (__v4df)__src, __base, (__v4si)__index, (__v4df)__mask, __scale); } __funline __m128d _mm_i64gather_pd(double const *__base, __m128i __index, const int __scale) { __v2df __src = _mm_setzero_pd(); __v2df __mask = _mm_cmpeq_pd(__src, __src); return (__m128d)__builtin_ia32_gatherdiv2df(__src, __base, (__v2di)__index, __mask, __scale); } __funline __m128d _mm_mask_i64gather_pd(__m128d __src, double const *__base, __m128i __index, __m128d __mask, const int __scale) { return (__m128d)__builtin_ia32_gatherdiv2df( (__v2df)__src, __base, (__v2di)__index, (__v2df)__mask, __scale); } __funline __m256d _mm256_i64gather_pd(double const *__base, __m256i __index, const int __scale) { __v4df __src = _mm256_setzero_pd(); __v4df __mask = _mm256_cmp_pd(__src, __src, _CMP_EQ_OQ); return (__m256d)__builtin_ia32_gatherdiv4df(__src, __base, (__v4di)__index, __mask, __scale); } __funline __m256d _mm256_mask_i64gather_pd(__m256d __src, double const *__base, __m256i __index, __m256d __mask, const int __scale) { return (__m256d)__builtin_ia32_gatherdiv4df( (__v4df)__src, __base, (__v4di)__index, (__v4df)__mask, __scale); } __funline __m128 _mm_i32gather_ps(float const *__base, __m128i __index, const int __scale) { __v4sf __src = _mm_setzero_ps(); __v4sf __mask = _mm_cmpeq_ps(__src, __src); return (__m128)__builtin_ia32_gathersiv4sf(__src, __base, (__v4si)__index, __mask, __scale); } __funline __m128 _mm_mask_i32gather_ps(__m128 __src, float const *__base, __m128i __index, __m128 __mask, const int __scale) { return (__m128)__builtin_ia32_gathersiv4sf( (__v4sf)__src, __base, (__v4si)__index, (__v4sf)__mask, __scale); } __funline __m256 _mm256_i32gather_ps(float const *__base, __m256i __index, const int __scale) { __v8sf __src = _mm256_setzero_ps(); __v8sf __mask = _mm256_cmp_ps(__src, __src, _CMP_EQ_OQ); return (__m256)__builtin_ia32_gathersiv8sf(__src, __base, (__v8si)__index, __mask, __scale); } __funline __m256 _mm256_mask_i32gather_ps(__m256 __src, float const *__base, __m256i __index, __m256 __mask, const int __scale) { return (__m256)__builtin_ia32_gathersiv8sf( (__v8sf)__src, __base, (__v8si)__index, (__v8sf)__mask, __scale); } __funline __m128 _mm_i64gather_ps(float const *__base, __m128i __index, const int __scale) { __v4sf __src = _mm_setzero_ps(); __v4sf __mask = _mm_cmpeq_ps(__src, __src); return (__m128)__builtin_ia32_gatherdiv4sf(__src, __base, (__v2di)__index, __mask, __scale); } __funline __m128 _mm_mask_i64gather_ps(__m128 __src, float const *__base, __m128i __index, __m128 __mask, const int __scale) { return (__m128)__builtin_ia32_gatherdiv4sf( (__v4sf)__src, __base, (__v2di)__index, (__v4sf)__mask, __scale); } __funline __m128 _mm256_i64gather_ps(float const *__base, __m256i __index, const int __scale) { __v4sf __src = _mm_setzero_ps(); __v4sf __mask = _mm_cmpeq_ps(__src, __src); return (__m128)__builtin_ia32_gatherdiv4sf256(__src, __base, (__v4di)__index, __mask, __scale); } __funline __m128 _mm256_mask_i64gather_ps(__m128 __src, float const *__base, __m256i __index, __m128 __mask, const int __scale) { return (__m128)__builtin_ia32_gatherdiv4sf256( (__v4sf)__src, __base, (__v4di)__index, (__v4sf)__mask, __scale); } __funline __m128i _mm_i32gather_epi64(long long int const *__base, __m128i __index, const int __scale) { __v2di __src = __extension__(__v2di){0, 0}; __v2di __mask = __extension__(__v2di){~0, ~0}; return (__m128i)__builtin_ia32_gathersiv2di(__src, __base, (__v4si)__index, __mask, __scale); } __funline __m128i _mm_mask_i32gather_epi64(__m128i __src, long long int const *__base, __m128i __index, __m128i __mask, const int __scale) { return (__m128i)__builtin_ia32_gathersiv2di( (__v2di)__src, __base, (__v4si)__index, (__v2di)__mask, __scale); } __funline __m256i _mm256_i32gather_epi64(long long int const *__base, __m128i __index, const int __scale) { __v4di __src = __extension__(__v4di){0, 0, 0, 0}; __v4di __mask = __extension__(__v4di){~0, ~0, ~0, ~0}; return (__m256i)__builtin_ia32_gathersiv4di(__src, __base, (__v4si)__index, __mask, __scale); } __funline __m256i _mm256_mask_i32gather_epi64(__m256i __src, long long int const *__base, __m128i __index, __m256i __mask, const int __scale) { return (__m256i)__builtin_ia32_gathersiv4di( (__v4di)__src, __base, (__v4si)__index, (__v4di)__mask, __scale); } __funline __m128i _mm_i64gather_epi64(long long int const *__base, __m128i __index, const int __scale) { __v2di __src = __extension__(__v2di){0, 0}; __v2di __mask = __extension__(__v2di){~0, ~0}; return (__m128i)__builtin_ia32_gatherdiv2di(__src, __base, (__v2di)__index, __mask, __scale); } __funline __m128i _mm_mask_i64gather_epi64(__m128i __src, long long int const *__base, __m128i __index, __m128i __mask, const int __scale) { return (__m128i)__builtin_ia32_gatherdiv2di( (__v2di)__src, __base, (__v2di)__index, (__v2di)__mask, __scale); } __funline __m256i _mm256_i64gather_epi64(long long int const *__base, __m256i __index, const int __scale) { __v4di __src = __extension__(__v4di){0, 0, 0, 0}; __v4di __mask = __extension__(__v4di){~0, ~0, ~0, ~0}; return (__m256i)__builtin_ia32_gatherdiv4di(__src, __base, (__v4di)__index, __mask, __scale); } __funline __m256i _mm256_mask_i64gather_epi64(__m256i __src, long long int const *__base, __m256i __index, __m256i __mask, const int __scale) { return (__m256i)__builtin_ia32_gatherdiv4di( (__v4di)__src, __base, (__v4di)__index, (__v4di)__mask, __scale); } __funline __m128i _mm_i32gather_epi32(int const *__base, __m128i __index, const int __scale) { __v4si __src = __extension__(__v4si){0, 0, 0, 0}; __v4si __mask = __extension__(__v4si){~0, ~0, ~0, ~0}; return (__m128i)__builtin_ia32_gathersiv4si(__src, __base, (__v4si)__index, __mask, __scale); } __funline __m128i _mm_mask_i32gather_epi32(__m128i __src, int const *__base, __m128i __index, __m128i __mask, const int __scale) { return (__m128i)__builtin_ia32_gathersiv4si( (__v4si)__src, __base, (__v4si)__index, (__v4si)__mask, __scale); } __funline __m256i _mm256_i32gather_epi32(int const *__base, __m256i __index, const int __scale) { __v8si __src = __extension__(__v8si){0, 0, 0, 0, 0, 0, 0, 0}; __v8si __mask = __extension__(__v8si){~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0}; return (__m256i)__builtin_ia32_gathersiv8si(__src, __base, (__v8si)__index, __mask, __scale); } __funline __m256i _mm256_mask_i32gather_epi32(__m256i __src, int const *__base, __m256i __index, __m256i __mask, const int __scale) { return (__m256i)__builtin_ia32_gathersiv8si( (__v8si)__src, __base, (__v8si)__index, (__v8si)__mask, __scale); } __funline __m128i _mm_i64gather_epi32(int const *__base, __m128i __index, const int __scale) { __v4si __src = __extension__(__v4si){0, 0, 0, 0}; __v4si __mask = __extension__(__v4si){~0, ~0, ~0, ~0}; return (__m128i)__builtin_ia32_gatherdiv4si(__src, __base, (__v2di)__index, __mask, __scale); } __funline __m128i _mm_mask_i64gather_epi32(__m128i __src, int const *__base, __m128i __index, __m128i __mask, const int __scale) { return (__m128i)__builtin_ia32_gatherdiv4si( (__v4si)__src, __base, (__v2di)__index, (__v4si)__mask, __scale); } __funline __m128i _mm256_i64gather_epi32(int const *__base, __m256i __index, const int __scale) { __v4si __src = __extension__(__v4si){0, 0, 0, 0}; __v4si __mask = __extension__(__v4si){~0, ~0, ~0, ~0}; return (__m128i)__builtin_ia32_gatherdiv4si256(__src, __base, (__v4di)__index, __mask, __scale); } __funline __m128i _mm256_mask_i64gather_epi32(__m128i __src, int const *__base, __m256i __index, __m128i __mask, const int __scale) { return (__m128i)__builtin_ia32_gatherdiv4si256( (__v4si)__src, __base, (__v4di)__index, (__v4si)__mask, __scale); } #else /* __OPTIMIZE__ */ #define _mm_i32gather_pd(BASE, INDEX, SCALE) \ (__m128d) __builtin_ia32_gathersiv2df( \ (__v2df)_mm_setzero_pd(), (double const *)BASE, (__v4si)(__m128i)INDEX, \ (__v2df)_mm_set1_pd((double)(long long int)-1), (int)SCALE) #define _mm_mask_i32gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ (__m128d) __builtin_ia32_gathersiv2df( \ (__v2df)(__m128d)SRC, (double const *)BASE, (__v4si)(__m128i)INDEX, \ (__v2df)(__m128d)MASK, (int)SCALE) #define _mm256_i32gather_pd(BASE, INDEX, SCALE) \ (__m256d) __builtin_ia32_gathersiv4df( \ (__v4df)_mm256_setzero_pd(), (double const *)BASE, \ (__v4si)(__m128i)INDEX, \ (__v4df)_mm256_set1_pd((double)(long long int)-1), (int)SCALE) #define _mm256_mask_i32gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ (__m256d) __builtin_ia32_gathersiv4df( \ (__v4df)(__m256d)SRC, (double const *)BASE, (__v4si)(__m128i)INDEX, \ (__v4df)(__m256d)MASK, (int)SCALE) #define _mm_i64gather_pd(BASE, INDEX, SCALE) \ (__m128d) __builtin_ia32_gatherdiv2df( \ (__v2df)_mm_setzero_pd(), (double const *)BASE, (__v2di)(__m128i)INDEX, \ (__v2df)_mm_set1_pd((double)(long long int)-1), (int)SCALE) #define _mm_mask_i64gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ (__m128d) __builtin_ia32_gatherdiv2df( \ (__v2df)(__m128d)SRC, (double const *)BASE, (__v2di)(__m128i)INDEX, \ (__v2df)(__m128d)MASK, (int)SCALE) #define _mm256_i64gather_pd(BASE, INDEX, SCALE) \ (__m256d) __builtin_ia32_gatherdiv4df( \ (__v4df)_mm256_setzero_pd(), (double const *)BASE, \ (__v4di)(__m256i)INDEX, \ (__v4df)_mm256_set1_pd((double)(long long int)-1), (int)SCALE) #define _mm256_mask_i64gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ (__m256d) __builtin_ia32_gatherdiv4df( \ (__v4df)(__m256d)SRC, (double const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4df)(__m256d)MASK, (int)SCALE) #define _mm_i32gather_ps(BASE, INDEX, SCALE) \ (__m128) __builtin_ia32_gathersiv4sf( \ (__v4sf)_mm_setzero_ps(), (float const *)BASE, (__v4si)(__m128i)INDEX, \ _mm_set1_ps((float)(int)-1), (int)SCALE) #define _mm_mask_i32gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ (__m128) __builtin_ia32_gathersiv4sf( \ (__v4sf)(__m128d)SRC, (float const *)BASE, (__v4si)(__m128i)INDEX, \ (__v4sf)(__m128d)MASK, (int)SCALE) #define _mm256_i32gather_ps(BASE, INDEX, SCALE) \ (__m256) __builtin_ia32_gathersiv8sf( \ (__v8sf)_mm256_setzero_ps(), (float const *)BASE, \ (__v8si)(__m256i)INDEX, (__v8sf)_mm256_set1_ps((float)(int)-1), \ (int)SCALE) #define _mm256_mask_i32gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ (__m256) __builtin_ia32_gathersiv8sf( \ (__v8sf)(__m256)SRC, (float const *)BASE, (__v8si)(__m256i)INDEX, \ (__v8sf)(__m256d)MASK, (int)SCALE) #define _mm_i64gather_ps(BASE, INDEX, SCALE) \ (__m128) __builtin_ia32_gatherdiv4sf( \ (__v4sf)_mm_setzero_pd(), (float const *)BASE, (__v2di)(__m128i)INDEX, \ (__v4sf)_mm_set1_ps((float)(int)-1), (int)SCALE) #define _mm_mask_i64gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ (__m128) __builtin_ia32_gatherdiv4sf( \ (__v4sf)(__m128)SRC, (float const *)BASE, (__v2di)(__m128i)INDEX, \ (__v4sf)(__m128d)MASK, (int)SCALE) #define _mm256_i64gather_ps(BASE, INDEX, SCALE) \ (__m128) __builtin_ia32_gatherdiv4sf256( \ (__v4sf)_mm_setzero_ps(), (float const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4sf)_mm_set1_ps((float)(int)-1), (int)SCALE) #define _mm256_mask_i64gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ (__m128) __builtin_ia32_gatherdiv4sf256( \ (__v4sf)(__m128)SRC, (float const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4sf)(__m128)MASK, (int)SCALE) #define _mm_i32gather_epi64(BASE, INDEX, SCALE) \ (__m128i) __builtin_ia32_gathersiv2di( \ (__v2di)_mm_setzero_si128(), (long long const *)BASE, \ (__v4si)(__m128i)INDEX, (__v2di)_mm_set1_epi64x(-1), (int)SCALE) #define _mm_mask_i32gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ (__m128i) __builtin_ia32_gathersiv2di( \ (__v2di)(__m128i)SRC, (long long const *)BASE, (__v4si)(__m128i)INDEX, \ (__v2di)(__m128i)MASK, (int)SCALE) #define _mm256_i32gather_epi64(BASE, INDEX, SCALE) \ (__m256i) __builtin_ia32_gathersiv4di( \ (__v4di)_mm256_setzero_si256(), (long long const *)BASE, \ (__v4si)(__m128i)INDEX, (__v4di)_mm256_set1_epi64x(-1), (int)SCALE) #define _mm256_mask_i32gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ (__m256i) __builtin_ia32_gathersiv4di( \ (__v4di)(__m256i)SRC, (long long const *)BASE, (__v4si)(__m128i)INDEX, \ (__v4di)(__m256i)MASK, (int)SCALE) #define _mm_i64gather_epi64(BASE, INDEX, SCALE) \ (__m128i) __builtin_ia32_gatherdiv2di( \ (__v2di)_mm_setzero_si128(), (long long const *)BASE, \ (__v2di)(__m128i)INDEX, (__v2di)_mm_set1_epi64x(-1), (int)SCALE) #define _mm_mask_i64gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ (__m128i) __builtin_ia32_gatherdiv2di( \ (__v2di)(__m128i)SRC, (long long const *)BASE, (__v2di)(__m128i)INDEX, \ (__v2di)(__m128i)MASK, (int)SCALE) #define _mm256_i64gather_epi64(BASE, INDEX, SCALE) \ (__m256i) __builtin_ia32_gatherdiv4di( \ (__v4di)_mm256_setzero_si256(), (long long const *)BASE, \ (__v4di)(__m256i)INDEX, (__v4di)_mm256_set1_epi64x(-1), (int)SCALE) #define _mm256_mask_i64gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ (__m256i) __builtin_ia32_gatherdiv4di( \ (__v4di)(__m256i)SRC, (long long const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4di)(__m256i)MASK, (int)SCALE) #define _mm_i32gather_epi32(BASE, INDEX, SCALE) \ (__m128i) __builtin_ia32_gathersiv4si( \ (__v4si)_mm_setzero_si128(), (int const *)BASE, (__v4si)(__m128i)INDEX, \ (__v4si)_mm_set1_epi32(-1), (int)SCALE) #define _mm_mask_i32gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ (__m128i) __builtin_ia32_gathersiv4si( \ (__v4si)(__m128i)SRC, (int const *)BASE, (__v4si)(__m128i)INDEX, \ (__v4si)(__m128i)MASK, (int)SCALE) #define _mm256_i32gather_epi32(BASE, INDEX, SCALE) \ (__m256i) __builtin_ia32_gathersiv8si( \ (__v8si)_mm256_setzero_si256(), (int const *)BASE, \ (__v8si)(__m256i)INDEX, (__v8si)_mm256_set1_epi32(-1), (int)SCALE) #define _mm256_mask_i32gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ (__m256i) __builtin_ia32_gathersiv8si( \ (__v8si)(__m256i)SRC, (int const *)BASE, (__v8si)(__m256i)INDEX, \ (__v8si)(__m256i)MASK, (int)SCALE) #define _mm_i64gather_epi32(BASE, INDEX, SCALE) \ (__m128i) __builtin_ia32_gatherdiv4si( \ (__v4si)_mm_setzero_si128(), (int const *)BASE, (__v2di)(__m128i)INDEX, \ (__v4si)_mm_set1_epi32(-1), (int)SCALE) #define _mm_mask_i64gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ (__m128i) __builtin_ia32_gatherdiv4si( \ (__v4si)(__m128i)SRC, (int const *)BASE, (__v2di)(__m128i)INDEX, \ (__v4si)(__m128i)MASK, (int)SCALE) #define _mm256_i64gather_epi32(BASE, INDEX, SCALE) \ (__m128i) __builtin_ia32_gatherdiv4si256( \ (__v4si)_mm_setzero_si128(), (int const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4si)_mm_set1_epi32(-1), (int)SCALE) #define _mm256_mask_i64gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ (__m128i) __builtin_ia32_gatherdiv4si256( \ (__v4si)(__m128i)SRC, (int const *)BASE, (__v4di)(__m256i)INDEX, \ (__v4si)(__m128i)MASK, (int)SCALE) #endif /* __OPTIMIZE__ */ #ifdef __DISABLE_AVX2__ #undef __DISABLE_AVX2__ #pragma GCC pop_options #endif /* __DISABLE_AVX2__ */ #endif /* _AVX2INTRIN_H_INCLUDED */
44,864
1,141
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vnniintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512vnniintrin.h> directly; include <immintrin.h> instead." #endif #ifndef __AVX512VNNIINTRIN_H_INCLUDED #define __AVX512VNNIINTRIN_H_INCLUDED #if !defined(__AVX512VNNI__) #pragma GCC push_options #pragma GCC target("avx512vnni") #define __DISABLE_AVX512VNNI__ #endif /* __AVX512VNNI__ */ __funline __m512i _mm512_dpbusd_epi32(__m512i __A, __m512i __B, __m512i __C) { return (__m512i)__builtin_ia32_vpdpbusd_v16si((__v16si)__A, (__v16si)__B, (__v16si)__C); } __funline __m512i _mm512_mask_dpbusd_epi32(__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpbusd_v16si_mask( (__v16si)__A, (__v16si)__C, (__v16si)__D, (__mmask16)__B); } __funline __m512i _mm512_maskz_dpbusd_epi32(__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpbusd_v16si_maskz( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__mmask16)__A); } __funline __m512i _mm512_dpbusds_epi32(__m512i __A, __m512i __B, __m512i __C) { return (__m512i)__builtin_ia32_vpdpbusds_v16si((__v16si)__A, (__v16si)__B, (__v16si)__C); } __funline __m512i _mm512_mask_dpbusds_epi32(__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpbusds_v16si_mask( (__v16si)__A, (__v16si)__C, (__v16si)__D, (__mmask16)__B); } __funline __m512i _mm512_maskz_dpbusds_epi32(__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpbusds_v16si_maskz( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__mmask16)__A); } __funline __m512i _mm512_dpwssd_epi32(__m512i __A, __m512i __B, __m512i __C) { return (__m512i)__builtin_ia32_vpdpwssd_v16si((__v16si)__A, (__v16si)__B, (__v16si)__C); } __funline __m512i _mm512_mask_dpwssd_epi32(__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpwssd_v16si_mask( (__v16si)__A, (__v16si)__C, (__v16si)__D, (__mmask16)__B); } __funline __m512i _mm512_maskz_dpwssd_epi32(__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpwssd_v16si_maskz( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__mmask16)__A); } __funline __m512i _mm512_dpwssds_epi32(__m512i __A, __m512i __B, __m512i __C) { return (__m512i)__builtin_ia32_vpdpwssds_v16si((__v16si)__A, (__v16si)__B, (__v16si)__C); } __funline __m512i _mm512_mask_dpwssds_epi32(__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpwssds_v16si_mask( (__v16si)__A, (__v16si)__C, (__v16si)__D, (__mmask16)__B); } __funline __m512i _mm512_maskz_dpwssds_epi32(__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) { return (__m512i)__builtin_ia32_vpdpwssds_v16si_maskz( (__v16si)__B, (__v16si)__C, (__v16si)__D, (__mmask16)__A); } #ifdef __DISABLE_AVX512VNNI__ #undef __DISABLE_AVX512VNNI__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VNNI__ */ #endif /* __AVX512VNNIINTRIN_H_INCLUDED */
3,517
88
jart/cosmopolitan
false
cosmopolitan/third_party/intel/waitpkgintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <waitpkgintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _WAITPKG_H_INCLUDED #define _WAITPKG_H_INCLUDED #ifndef __WAITPKG__ #pragma GCC push_options #pragma GCC target("waitpkg") #define __DISABLE_WAITPKG__ #endif /* __WAITPKG__ */ __funline void _umonitor(void *__A) { __builtin_ia32_umonitor(__A); } __funline unsigned char _umwait(unsigned int __A, unsigned long long __B) { return __builtin_ia32_umwait(__A, __B); } __funline unsigned char _tpause(unsigned int __A, unsigned long long __B) { return __builtin_ia32_tpause(__A, __B); } #ifdef __DISABLE_WAITPKG__ #undef __DISABLE_WAITPKG__ #pragma GCC pop_options #endif /* __DISABLE_WAITPKG__ */ #endif /* _WAITPKG_H_INCLUDED. */
769
32
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vpopcntdqintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512vpopcntdqintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _AVX512VPOPCNTDQINTRIN_H_INCLUDED #define _AVX512VPOPCNTDQINTRIN_H_INCLUDED #ifndef __AVX512VPOPCNTDQ__ #pragma GCC push_options #pragma GCC target("avx512vpopcntdq") #define __DISABLE_AVX512VPOPCNTDQ__ #endif /* __AVX512VPOPCNTDQ__ */ __funline __m512i _mm512_popcnt_epi32(__m512i __A) { return (__m512i)__builtin_ia32_vpopcountd_v16si((__v16si)__A); } __funline __m512i _mm512_mask_popcnt_epi32(__m512i __A, __mmask16 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpopcountd_v16si_mask( (__v16si)__A, (__v16si)__B, (__mmask16)__U); } __funline __m512i _mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpopcountd_v16si_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_popcnt_epi64(__m512i __A) { return (__m512i)__builtin_ia32_vpopcountq_v8di((__v8di)__A); } __funline __m512i _mm512_mask_popcnt_epi64(__m512i __A, __mmask8 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpopcountq_v8di_mask((__v8di)__A, (__v8di)__B, (__mmask8)__U); } __funline __m512i _mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpopcountq_v8di_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __DISABLE_AVX512VPOPCNTDQ__ #undef __DISABLE_AVX512VPOPCNTDQ__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VPOPCNTDQ__ */ #endif /* _AVX512VPOPCNTDQINTRIN_H_INCLUDED */
1,721
51
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xmmintrin.internal.h
#ifndef _XMMINTRIN_H_INCLUDED #define _XMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/mm_malloc.internal.h" #include "third_party/intel/mmintrin.internal.h" enum _mm_hint { _MM_HINT_ET0 = 7, _MM_HINT_ET1 = 6, _MM_HINT_T0 = 3, _MM_HINT_T1 = 2, _MM_HINT_T2 = 1, _MM_HINT_NTA = 0 }; #ifdef __OPTIMIZE__ __funline void _mm_prefetch(const void *__P, enum _mm_hint __I) { __builtin_prefetch(__P, (__I & 0x4) >> 2, __I & 0x3); } #else #define _mm_prefetch(P, I) __builtin_prefetch((P), ((I & 0x4) >> 2), (I & 0x3)) #endif #ifndef __SSE__ #pragma GCC push_options #pragma GCC target("sse") #define __DISABLE_SSE__ #endif /* __SSE__ */ typedef float __m128 __attribute__((__vector_size__(16), __may_alias__)); typedef float __m128_u __attribute__((__vector_size__(16), __may_alias__, __aligned__(1))); typedef float __v4sf __attribute__((__vector_size__(16))); #define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) #define _MM_EXCEPT_MASK 0x003f #define _MM_EXCEPT_INVALID 0x0001 #define _MM_EXCEPT_DENORM 0x0002 #define _MM_EXCEPT_DIV_ZERO 0x0004 #define _MM_EXCEPT_OVERFLOW 0x0008 #define _MM_EXCEPT_UNDERFLOW 0x0010 #define _MM_EXCEPT_INEXACT 0x0020 #define _MM_MASK_MASK 0x1f80 #define _MM_MASK_INVALID 0x0080 #define _MM_MASK_DENORM 0x0100 #define _MM_MASK_DIV_ZERO 0x0200 #define _MM_MASK_OVERFLOW 0x0400 #define _MM_MASK_UNDERFLOW 0x0800 #define _MM_MASK_INEXACT 0x1000 #define _MM_ROUND_MASK 0x6000 #define _MM_ROUND_NEAREST 0x0000 #define _MM_ROUND_DOWN 0x2000 #define _MM_ROUND_UP 0x4000 #define _MM_ROUND_TOWARD_ZERO 0x6000 #define _MM_FLUSH_ZERO_MASK 0x8000 #define _MM_FLUSH_ZERO_ON 0x8000 #define _MM_FLUSH_ZERO_OFF 0x0000 __funline __m128 _mm_undefined_ps(void) { __m128 __Y = __Y; return __Y; } __funline __m128 _mm_setzero_ps(void) { return __extension__(__m128){0.0f, 0.0f, 0.0f, 0.0f}; } __funline __m128 _mm_add_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_addss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_sub_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_subss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_mul_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_mulss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_div_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_divss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_sqrt_ss(__m128 __A) { return (__m128)__builtin_ia32_sqrtss((__v4sf)__A); } __funline __m128 _mm_rcp_ss(__m128 __A) { return (__m128)__builtin_ia32_rcpss((__v4sf)__A); } __funline __m128 _mm_rsqrt_ss(__m128 __A) { return (__m128)__builtin_ia32_rsqrtss((__v4sf)__A); } __funline __m128 _mm_min_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_max_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_add_ps(__m128 __A, __m128 __B) { return (__m128)((__v4sf)__A + (__v4sf)__B); } __funline __m128 _mm_sub_ps(__m128 __A, __m128 __B) { return (__m128)((__v4sf)__A - (__v4sf)__B); } __funline __m128 _mm_mul_ps(__m128 __A, __m128 __B) { return (__m128)((__v4sf)__A * (__v4sf)__B); } __funline __m128 _mm_div_ps(__m128 __A, __m128 __B) { return (__m128)((__v4sf)__A / (__v4sf)__B); } __funline __m128 _mm_sqrt_ps(__m128 __A) { return (__m128)__builtin_ia32_sqrtps((__v4sf)__A); } __funline __m128 _mm_rcp_ps(__m128 __A) { return (__m128)__builtin_ia32_rcpps((__v4sf)__A); } __funline __m128 _mm_rsqrt_ps(__m128 __A) { return (__m128)__builtin_ia32_rsqrtps((__v4sf)__A); } __funline __m128 _mm_min_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_max_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_and_ps(__m128 __A, __m128 __B) { return __builtin_ia32_andps(__A, __B); } __funline __m128 _mm_andnot_ps(__m128 __A, __m128 __B) { return __builtin_ia32_andnps(__A, __B); } __funline __m128 _mm_or_ps(__m128 __A, __m128 __B) { return __builtin_ia32_orps(__A, __B); } __funline __m128 _mm_xor_ps(__m128 __A, __m128 __B) { return __builtin_ia32_xorps(__A, __B); } __funline __m128 _mm_cmpeq_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpeqss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmplt_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpltss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmple_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpless((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpgt_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movss( (__v4sf)__A, (__v4sf)__builtin_ia32_cmpltss((__v4sf)__B, (__v4sf)__A)); } __funline __m128 _mm_cmpge_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movss( (__v4sf)__A, (__v4sf)__builtin_ia32_cmpless((__v4sf)__B, (__v4sf)__A)); } __funline __m128 _mm_cmpneq_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpneqss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpnlt_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpnltss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpnle_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpnless((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpngt_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movss( (__v4sf)__A, (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__B, (__v4sf)__A)); } __funline __m128 _mm_cmpnge_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movss( (__v4sf)__A, (__v4sf)__builtin_ia32_cmpnless((__v4sf)__B, (__v4sf)__A)); } __funline __m128 _mm_cmpord_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpordss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpunord_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpunordss((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpeq_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpeqps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmplt_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpltps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmple_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpleps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpgt_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpgtps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpge_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpgeps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpneq_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpneqps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpnlt_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpnltps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpnle_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpnleps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpngt_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpngtps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpnge_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpngeps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpord_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpordps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_cmpunord_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_cmpunordps((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comieq_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comieq((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comilt_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comilt((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comile_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comile((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comigt_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comigt((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comige_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comige((__v4sf)__A, (__v4sf)__B); } __funline int _mm_comineq_ss(__m128 __A, __m128 __B) { return __builtin_ia32_comineq((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomieq_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomieq((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomilt_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomilt((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomile_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomile((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomigt_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomigt((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomige_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomige((__v4sf)__A, (__v4sf)__B); } __funline int _mm_ucomineq_ss(__m128 __A, __m128 __B) { return __builtin_ia32_ucomineq((__v4sf)__A, (__v4sf)__B); } __funline int _mm_cvtss_si32(__m128 __A) { return __builtin_ia32_cvtss2si((__v4sf)__A); } __funline int _mm_cvt_ss2si(__m128 __A) { return _mm_cvtss_si32(__A); } #ifdef __x86_64__ __funline long long _mm_cvtss_si64(__m128 __A) { return __builtin_ia32_cvtss2si64((__v4sf)__A); } __funline long long _mm_cvtss_si64x(__m128 __A) { return __builtin_ia32_cvtss2si64((__v4sf)__A); } #endif __funline __m64 _mm_cvtps_pi32(__m128 __A) { return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__A); } __funline __m64 _mm_cvt_ps2pi(__m128 __A) { return _mm_cvtps_pi32(__A); } __funline int _mm_cvttss_si32(__m128 __A) { return __builtin_ia32_cvttss2si((__v4sf)__A); } __funline int _mm_cvtt_ss2si(__m128 __A) { return _mm_cvttss_si32(__A); } #ifdef __x86_64__ __funline long long _mm_cvttss_si64(__m128 __A) { return __builtin_ia32_cvttss2si64((__v4sf)__A); } __funline long long _mm_cvttss_si64x(__m128 __A) { return __builtin_ia32_cvttss2si64((__v4sf)__A); } #endif __funline __m64 _mm_cvttps_pi32(__m128 __A) { return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__A); } __funline __m64 _mm_cvtt_ps2pi(__m128 __A) { return _mm_cvttps_pi32(__A); } __funline __m128 _mm_cvtsi32_ss(__m128 __A, int __B) { return (__m128)__builtin_ia32_cvtsi2ss((__v4sf)__A, __B); } __funline __m128 _mm_cvt_si2ss(__m128 __A, int __B) { return _mm_cvtsi32_ss(__A, __B); } #ifdef __x86_64__ __funline __m128 _mm_cvtsi64_ss(__m128 __A, long long __B) { return (__m128)__builtin_ia32_cvtsi642ss((__v4sf)__A, __B); } __funline __m128 _mm_cvtsi64x_ss(__m128 __A, long long __B) { return (__m128)__builtin_ia32_cvtsi642ss((__v4sf)__A, __B); } #endif __funline __m128 _mm_cvtpi32_ps(__m128 __A, __m64 __B) { return (__m128)__builtin_ia32_cvtpi2ps((__v4sf)__A, (__v2si)__B); } __funline __m128 _mm_cvt_pi2ps(__m128 __A, __m64 __B) { return _mm_cvtpi32_ps(__A, __B); } __funline __m128 _mm_cvtpi16_ps(__m64 __A) { __v4hi __sign; __v2si __hisi, __losi; __v4sf __zero, __ra, __rb; __sign = __builtin_ia32_pcmpgtw((__v4hi)0LL, (__v4hi)__A); __losi = (__v2si)__builtin_ia32_punpcklwd((__v4hi)__A, __sign); __hisi = (__v2si)__builtin_ia32_punpckhwd((__v4hi)__A, __sign); __zero = (__v4sf)_mm_setzero_ps(); __ra = __builtin_ia32_cvtpi2ps(__zero, __losi); __rb = __builtin_ia32_cvtpi2ps(__ra, __hisi); return (__m128)__builtin_ia32_movlhps(__ra, __rb); } __funline __m128 _mm_cvtpu16_ps(__m64 __A) { __v2si __hisi, __losi; __v4sf __zero, __ra, __rb; __losi = (__v2si)__builtin_ia32_punpcklwd((__v4hi)__A, (__v4hi)0LL); __hisi = (__v2si)__builtin_ia32_punpckhwd((__v4hi)__A, (__v4hi)0LL); __zero = (__v4sf)_mm_setzero_ps(); __ra = __builtin_ia32_cvtpi2ps(__zero, __losi); __rb = __builtin_ia32_cvtpi2ps(__ra, __hisi); return (__m128)__builtin_ia32_movlhps(__ra, __rb); } __funline __m128 _mm_cvtpi8_ps(__m64 __A) { __v8qi __sign; __sign = __builtin_ia32_pcmpgtb((__v8qi)0LL, (__v8qi)__A); __A = (__m64)__builtin_ia32_punpcklbw((__v8qi)__A, __sign); return _mm_cvtpi16_ps(__A); } __funline __m128 _mm_cvtpu8_ps(__m64 __A) { __A = (__m64)__builtin_ia32_punpcklbw((__v8qi)__A, (__v8qi)0LL); return _mm_cvtpu16_ps(__A); } __funline __m128 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) { __v4sf __zero = (__v4sf)_mm_setzero_ps(); __v4sf __sfa = __builtin_ia32_cvtpi2ps(__zero, (__v2si)__A); __v4sf __sfb = __builtin_ia32_cvtpi2ps(__sfa, (__v2si)__B); return (__m128)__builtin_ia32_movlhps(__sfa, __sfb); } __funline __m64 _mm_cvtps_pi16(__m128 __A) { __v4sf __hisf = (__v4sf)__A; __v4sf __losf = __builtin_ia32_movhlps(__hisf, __hisf); __v2si __hisi = __builtin_ia32_cvtps2pi(__hisf); __v2si __losi = __builtin_ia32_cvtps2pi(__losf); return (__m64)__builtin_ia32_packssdw(__hisi, __losi); } __funline __m64 _mm_cvtps_pi8(__m128 __A) { __v4hi __tmp = (__v4hi)_mm_cvtps_pi16(__A); return (__m64)__builtin_ia32_packsswb(__tmp, (__v4hi)0LL); } #ifdef __OPTIMIZE__ __funline __m128 _mm_shuffle_ps(__m128 __A, __m128 __B, int const __mask) { return (__m128)__builtin_ia32_shufps((__v4sf)__A, (__v4sf)__B, __mask); } #else #define _mm_shuffle_ps(A, B, MASK) \ ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \ (int)(MASK))) #endif __funline __m128 _mm_unpackhi_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpckhps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_unpacklo_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpcklps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_loadh_pi(__m128 __A, __m64 const *__P) { return (__m128)__builtin_ia32_loadhps((__v4sf)__A, (const __v2sf *)__P); } __funline void _mm_storeh_pi(__m64 *__P, __m128 __A) { __builtin_ia32_storehps((__v2sf *)__P, (__v4sf)__A); } __funline __m128 _mm_movehl_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movhlps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_movelh_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movlhps((__v4sf)__A, (__v4sf)__B); } __funline __m128 _mm_loadl_pi(__m128 __A, __m64 const *__P) { return (__m128)__builtin_ia32_loadlps((__v4sf)__A, (const __v2sf *)__P); } __funline void _mm_storel_pi(__m64 *__P, __m128 __A) { __builtin_ia32_storelps((__v2sf *)__P, (__v4sf)__A); } __funline int _mm_movemask_ps(__m128 __A) { return __builtin_ia32_movmskps((__v4sf)__A); } __funline unsigned int _mm_getcsr(void) { return __builtin_ia32_stmxcsr(); } __funline unsigned int _MM_GET_EXCEPTION_STATE(void) { return _mm_getcsr() & _MM_EXCEPT_MASK; } __funline unsigned int _MM_GET_EXCEPTION_MASK(void) { return _mm_getcsr() & _MM_MASK_MASK; } __funline unsigned int _MM_GET_ROUNDING_MODE(void) { return _mm_getcsr() & _MM_ROUND_MASK; } __funline unsigned int _MM_GET_FLUSH_ZERO_MODE(void) { return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; } __funline void _mm_setcsr(unsigned int __I) { __builtin_ia32_ldmxcsr(__I); } __funline void _MM_SET_EXCEPTION_STATE(unsigned int __mask) { _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); } __funline void _MM_SET_EXCEPTION_MASK(unsigned int __mask) { _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); } __funline void _MM_SET_ROUNDING_MODE(unsigned int __mode) { _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); } __funline void _MM_SET_FLUSH_ZERO_MODE(unsigned int __mode) { _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); } __funline __m128 _mm_set_ss(float __F) { return __extension__(__m128)(__v4sf){__F, 0.0f, 0.0f, 0.0f}; } __funline __m128 _mm_set1_ps(float __F) { return __extension__(__m128)(__v4sf){__F, __F, __F, __F}; } __funline __m128 _mm_set_ps1(float __F) { return _mm_set1_ps(__F); } __funline __m128 _mm_load_ss(float const *__P) { return _mm_set_ss(*__P); } __funline __m128 _mm_load1_ps(float const *__P) { return _mm_set1_ps(*__P); } __funline __m128 _mm_load_ps1(float const *__P) { return _mm_load1_ps(__P); } __funline __m128 _mm_load_ps(float const *__P) { return *(__m128 *)__P; } __funline __m128 _mm_loadu_ps(float const *__P) { return *(__m128_u *)__P; } __funline __m128 _mm_loadr_ps(float const *__P) { __v4sf __tmp = *(__v4sf *)__P; return (__m128)__builtin_ia32_shufps(__tmp, __tmp, _MM_SHUFFLE(0, 1, 2, 3)); } __funline __m128 _mm_set_ps(const float __Z, const float __Y, const float __X, const float __W) { return __extension__(__m128)(__v4sf){__W, __X, __Y, __Z}; } __funline __m128 _mm_setr_ps(float __Z, float __Y, float __X, float __W) { return __extension__(__m128)(__v4sf){__Z, __Y, __X, __W}; } __funline void _mm_store_ss(float *__P, __m128 __A) { *__P = ((__v4sf)__A)[0]; } __funline float _mm_cvtss_f32(__m128 __A) { return ((__v4sf)__A)[0]; } __funline void _mm_store_ps(float *__P, __m128 __A) { *(__m128 *)__P = __A; } __funline void _mm_storeu_ps(float *__P, __m128 __A) { *(__m128_u *)__P = __A; } __funline void _mm_store1_ps(float *__P, __m128 __A) { __v4sf __va = (__v4sf)__A; __v4sf __tmp = __builtin_ia32_shufps(__va, __va, _MM_SHUFFLE(0, 0, 0, 0)); _mm_storeu_ps(__P, __tmp); } __funline void _mm_store_ps1(float *__P, __m128 __A) { _mm_store1_ps(__P, __A); } __funline void _mm_storer_ps(float *__P, __m128 __A) { __v4sf __va = (__v4sf)__A; __v4sf __tmp = __builtin_ia32_shufps(__va, __va, _MM_SHUFFLE(0, 1, 2, 3)); _mm_store_ps(__P, __tmp); } __funline __m128 _mm_move_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_shuffle( (__v4sf)__A, (__v4sf)__B, __extension__(__attribute__((__vector_size__(16))) int){4, 1, 2, 3}); } #ifdef __OPTIMIZE__ __funline int _mm_extract_pi16(__m64 const __A, int const __N) { return __builtin_ia32_vec_ext_v4hi((__v4hi)__A, __N); } __funline int _m_pextrw(__m64 const __A, int const __N) { return _mm_extract_pi16(__A, __N); } #else #define _mm_extract_pi16(A, N) \ ((int)__builtin_ia32_vec_ext_v4hi((__v4hi)(__m64)(A), (int)(N))) #define _m_pextrw(A, N) _mm_extract_pi16(A, N) #endif #ifdef __OPTIMIZE__ __funline __m64 _mm_insert_pi16(__m64 const __A, int const __D, int const __N) { return (__m64)__builtin_ia32_vec_set_v4hi((__v4hi)__A, __D, __N); } __funline __m64 _m_pinsrw(__m64 const __A, int const __D, int const __N) { return _mm_insert_pi16(__A, __D, __N); } #else #define _mm_insert_pi16(A, D, N) \ ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)(__m64)(A), (int)(D), (int)(N))) #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N) #endif __funline __m64 _mm_max_pi16(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pmaxsw((__v4hi)__A, (__v4hi)__B); } __funline __m64 _m_pmaxsw(__m64 __A, __m64 __B) { return _mm_max_pi16(__A, __B); } __funline __m64 _mm_max_pu8(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pmaxub((__v8qi)__A, (__v8qi)__B); } __funline __m64 _m_pmaxub(__m64 __A, __m64 __B) { return _mm_max_pu8(__A, __B); } __funline __m64 _mm_min_pi16(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pminsw((__v4hi)__A, (__v4hi)__B); } __funline __m64 _m_pminsw(__m64 __A, __m64 __B) { return _mm_min_pi16(__A, __B); } __funline __m64 _mm_min_pu8(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pminub((__v8qi)__A, (__v8qi)__B); } __funline __m64 _m_pminub(__m64 __A, __m64 __B) { return _mm_min_pu8(__A, __B); } __funline int _mm_movemask_pi8(__m64 __A) { return __builtin_ia32_pmovmskb((__v8qi)__A); } __funline int _m_pmovmskb(__m64 __A) { return _mm_movemask_pi8(__A); } __funline __m64 _mm_mulhi_pu16(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pmulhuw((__v4hi)__A, (__v4hi)__B); } __funline __m64 _m_pmulhuw(__m64 __A, __m64 __B) { return _mm_mulhi_pu16(__A, __B); } #ifdef __OPTIMIZE__ __funline __m64 _mm_shuffle_pi16(__m64 __A, int const __N) { return (__m64)__builtin_ia32_pshufw((__v4hi)__A, __N); } __funline __m64 _m_pshufw(__m64 __A, int const __N) { return _mm_shuffle_pi16(__A, __N); } #else #define _mm_shuffle_pi16(A, N) \ ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(A), (int)(N))) #define _m_pshufw(A, N) _mm_shuffle_pi16(A, N) #endif __funline void _mm_maskmove_si64(__m64 __A, __m64 __N, char *__P) { __builtin_ia32_maskmovq((__v8qi)__A, (__v8qi)__N, __P); } __funline void _m_maskmovq(__m64 __A, __m64 __N, char *__P) { _mm_maskmove_si64(__A, __N, __P); } __funline __m64 _mm_avg_pu8(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pavgb((__v8qi)__A, (__v8qi)__B); } __funline __m64 _m_pavgb(__m64 __A, __m64 __B) { return _mm_avg_pu8(__A, __B); } __funline __m64 _mm_avg_pu16(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pavgw((__v4hi)__A, (__v4hi)__B); } __funline __m64 _m_pavgw(__m64 __A, __m64 __B) { return _mm_avg_pu16(__A, __B); } __funline __m64 _mm_sad_pu8(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_psadbw((__v8qi)__A, (__v8qi)__B); } __funline __m64 _m_psadbw(__m64 __A, __m64 __B) { return _mm_sad_pu8(__A, __B); } __funline void _mm_stream_pi(__m64 *__P, __m64 __A) { __builtin_ia32_movntq((unsigned long long *)__P, (unsigned long long)__A); } __funline void _mm_stream_ps(float *__P, __m128 __A) { __builtin_ia32_movntps(__P, (__v4sf)__A); } __funline void _mm_sfence(void) { __builtin_ia32_sfence(); } #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ __v4sf __t0 = __builtin_ia32_unpcklps(__r0, __r1); \ __v4sf __t1 = __builtin_ia32_unpcklps(__r2, __r3); \ __v4sf __t2 = __builtin_ia32_unpckhps(__r0, __r1); \ __v4sf __t3 = __builtin_ia32_unpckhps(__r2, __r3); \ (row0) = __builtin_ia32_movlhps(__t0, __t1); \ (row1) = __builtin_ia32_movhlps(__t1, __t0); \ (row2) = __builtin_ia32_movlhps(__t2, __t3); \ (row3) = __builtin_ia32_movhlps(__t3, __t2); \ } while (0) #include "third_party/intel/emmintrin.internal.h" #ifdef __DISABLE_SSE__ #undef __DISABLE_SSE__ #pragma GCC pop_options #endif /* __DISABLE_SSE__ */ __funline void _mm_pause(void) { __builtin_ia32_pause(); } #endif /* __x86_64__ */ #endif /* _XMMINTRIN_H_INCLUDED */
22,311
784
jart/cosmopolitan
false
cosmopolitan/third_party/intel/clwbintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <clwbintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _CLWBINTRIN_H_INCLUDED #define _CLWBINTRIN_H_INCLUDED #ifndef __CLWB__ #pragma GCC push_options #pragma GCC target("clwb") #define __DISABLE_CLWB__ #endif /* __CLWB__ */ __funline void _mm_clwb(void *__A) { __builtin_ia32_clwb(__A); } #ifdef __DISABLE_CLWB__ #undef __DISABLE_CLWB__ #pragma GCC pop_options #endif /* __DISABLE_CLWB__ */ #endif /* _CLWBINTRIN_H_INCLUDED */
505
24
jart/cosmopolitan
false
cosmopolitan/third_party/intel/mwaitxintrin.internal.h
#ifndef _MWAITXINTRIN_H_INCLUDED #define _MWAITXINTRIN_H_INCLUDED #ifdef __x86_64__ #ifndef __MWAITX__ #pragma GCC push_options #pragma GCC target("mwaitx") #define __DISABLE_MWAITX__ #endif /* __MWAITX__ */ __funline void _mm_monitorx(void const* __P, unsigned int __E, unsigned int __H) { __builtin_ia32_monitorx(__P, __E, __H); } __funline void _mm_mwaitx(unsigned int __E, unsigned int __H, unsigned int __C) { __builtin_ia32_mwaitx(__E, __H, __C); } #ifdef __DISABLE_MWAITX__ #undef __DISABLE_MWAITX__ #pragma GCC pop_options #endif /* __DISABLE_MWAITX__ */ #endif /* __x86_64__ */ #endif /* _MWAITXINTRIN_H_INCLUDED */
634
26
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vnnivlintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512vnnivlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VNNIVLINTRIN_H_INCLUDED #define _AVX512VNNIVLINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512VNNI__) #pragma GCC push_options #pragma GCC target("avx512vnni,avx512vl") #define __DISABLE_AVX512VNNIVL__ #endif /* __AVX512VNNIVL__ */ __funline __m256i _mm256_dpbusd_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpdpbusd_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_dpbusd_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpbusd_v8si_mask((__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_dpbusd_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpbusd_v8si_maskz( (__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m128i _mm_dpbusd_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpdpbusd_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_dpbusd_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpbusd_v4si_mask((__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_dpbusd_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpbusd_v4si_maskz( (__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } __funline __m256i _mm256_dpbusds_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpdpbusds_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_dpbusds_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpbusds_v8si_mask( (__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_dpbusds_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpbusds_v8si_maskz( (__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m128i _mm_dpbusds_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpdpbusds_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_dpbusds_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpbusds_v4si_mask( (__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_dpbusds_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpbusds_v4si_maskz( (__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } __funline __m256i _mm256_dpwssd_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpdpwssd_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_dpwssd_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpwssd_v8si_mask((__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_dpwssd_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpwssd_v8si_maskz( (__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m128i _mm_dpwssd_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpdpwssd_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_dpwssd_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpwssd_v4si_mask((__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_dpwssd_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpwssd_v4si_maskz( (__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } __funline __m256i _mm256_dpwssds_epi32(__m256i __A, __m256i __B, __m256i __C) { return (__m256i)__builtin_ia32_vpdpwssds_v8si((__v8si)__A, (__v8si)__B, (__v8si)__C); } __funline __m256i _mm256_mask_dpwssds_epi32(__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpwssds_v8si_mask( (__v8si)__A, (__v8si)__C, (__v8si)__D, (__mmask8)__B); } __funline __m256i _mm256_maskz_dpwssds_epi32(__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) { return (__m256i)__builtin_ia32_vpdpwssds_v8si_maskz( (__v8si)__B, (__v8si)__C, (__v8si)__D, (__mmask8)__A); } __funline __m128i _mm_dpwssds_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpdpwssds_v4si((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_mask_dpwssds_epi32(__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpwssds_v4si_mask( (__v4si)__A, (__v4si)__C, (__v4si)__D, (__mmask8)__B); } __funline __m128i _mm_maskz_dpwssds_epi32(__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) { return (__m128i)__builtin_ia32_vpdpwssds_v4si_maskz( (__v4si)__B, (__v4si)__C, (__v4si)__D, (__mmask8)__A); } #ifdef __DISABLE_AVX512VNNIVL__ #undef __DISABLE_AVX512VNNIVL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VNNIVL__ */ #endif /* __DISABLE_AVX512VNNIVL__ */
6,559
155
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vbmivlintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512vbmivlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VBMIVLINTRIN_H_INCLUDED #define _AVX512VBMIVLINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512VBMI__) #pragma GCC push_options #pragma GCC target("avx512vbmi,avx512vl") #define __DISABLE_AVX512VBMIVL__ #endif /* __AVX512VBMIVL__ */ __funline __m256i _mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_vpmultishiftqb256_mask( (__v32qi)__X, (__v32qi)__Y, (__v32qi)__W, (__mmask32)__M); } __funline __m256i _mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_vpmultishiftqb256_mask( (__v32qi)__X, (__v32qi)__Y, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_vpmultishiftqb256_mask( (__v32qi)__X, (__v32qi)__Y, (__v32qi)_mm256_undefined_si256(), (__mmask32)-1); } __funline __m128i _mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_vpmultishiftqb128_mask( (__v16qi)__X, (__v16qi)__Y, (__v16qi)__W, (__mmask16)__M); } __funline __m128i _mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_vpmultishiftqb128_mask( (__v16qi)__X, (__v16qi)__Y, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_vpmultishiftqb128_mask( (__v16qi)__X, (__v16qi)__Y, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline __m256i _mm256_permutexvar_epi8(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarqi256_mask( (__v32qi)__B, (__v32qi)__A, (__v32qi)_mm256_undefined_si256(), (__mmask32)-1); } __funline __m256i _mm256_maskz_permutexvar_epi8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarqi256_mask( (__v32qi)__B, (__v32qi)__A, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_mask_permutexvar_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarqi256_mask( (__v32qi)__B, (__v32qi)__A, (__v32qi)__W, (__mmask32)__M); } __funline __m128i _mm_permutexvar_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarqi128_mask( (__v16qi)__B, (__v16qi)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline __m128i _mm_maskz_permutexvar_epi8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarqi128_mask( (__v16qi)__B, (__v16qi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_mask_permutexvar_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarqi128_mask( (__v16qi)__B, (__v16qi)__A, (__v16qi)__W, (__mmask16)__M); } __funline __m256i _mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varqi256_mask( (__v32qi)__I /* idx */, (__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __m256i _mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varqi256_mask( (__v32qi)__I /* idx */, (__v32qi)__A, (__v32qi)__B, (__mmask32)__U); } __funline __m256i _mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpermi2varqi256_mask((__v32qi)__A, (__v32qi)__I /* idx */, (__v32qi)__B, (__mmask32)__U); } __funline __m256i _mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varqi256_maskz( (__v32qi)__I /* idx */, (__v32qi)__A, (__v32qi)__B, (__mmask32)__U); } __funline __m128i _mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varqi128_mask( (__v16qi)__I /* idx */, (__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __m128i _mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varqi128_mask( (__v16qi)__I /* idx */, (__v16qi)__A, (__v16qi)__B, (__mmask16)__U); } __funline __m128i _mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpermi2varqi128_mask((__v16qi)__A, (__v16qi)__I /* idx */, (__v16qi)__B, (__mmask16)__U); } __funline __m128i _mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varqi128_maskz( (__v16qi)__I /* idx */, (__v16qi)__A, (__v16qi)__B, (__mmask16)__U); } #ifdef __DISABLE_AVX512VBMIVL__ #undef __DISABLE_AVX512VBMIVL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMIVL__ */ #endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */
6,303
160
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avxintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avxintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVXINTRIN_H_INCLUDED #define _AVXINTRIN_H_INCLUDED #ifndef __AVX__ #pragma GCC push_options #pragma GCC target("avx") #define __DISABLE_AVX__ #endif /* __AVX__ */ typedef double __v4df __attribute__((__vector_size__(32))); typedef float __v8sf __attribute__((__vector_size__(32))); typedef long long __v4di __attribute__((__vector_size__(32))); typedef unsigned long long __v4du __attribute__((__vector_size__(32))); typedef int __v8si __attribute__((__vector_size__(32))); typedef unsigned int __v8su __attribute__((__vector_size__(32))); typedef short __v16hi __attribute__((__vector_size__(32))); typedef unsigned short __v16hu __attribute__((__vector_size__(32))); typedef char __v32qi __attribute__((__vector_size__(32))); typedef unsigned char __v32qu __attribute__((__vector_size__(32))); typedef float __m256 __attribute__((__vector_size__(32), __may_alias__)); typedef long long __m256i __attribute__((__vector_size__(32), __may_alias__)); typedef double __m256d __attribute__((__vector_size__(32), __may_alias__)); typedef float __m256_u __attribute__((__vector_size__(32), __may_alias__, __aligned__(1))); typedef long long __m256i_u __attribute__((__vector_size__(32), __may_alias__, __aligned__(1))); typedef double __m256d_u __attribute__((__vector_size__(32), __may_alias__, __aligned__(1))); #define _CMP_EQ_OQ 0x00 #define _CMP_LT_OS 0x01 #define _CMP_LE_OS 0x02 #define _CMP_UNORD_Q 0x03 #define _CMP_NEQ_UQ 0x04 #define _CMP_NLT_US 0x05 #define _CMP_NLE_US 0x06 #define _CMP_ORD_Q 0x07 #define _CMP_EQ_UQ 0x08 #define _CMP_NGE_US 0x09 #define _CMP_NGT_US 0x0a #define _CMP_FALSE_OQ 0x0b #define _CMP_NEQ_OQ 0x0c #define _CMP_GE_OS 0x0d #define _CMP_GT_OS 0x0e #define _CMP_TRUE_UQ 0x0f #define _CMP_EQ_OS 0x10 #define _CMP_LT_OQ 0x11 #define _CMP_LE_OQ 0x12 #define _CMP_UNORD_S 0x13 #define _CMP_NEQ_US 0x14 #define _CMP_NLT_UQ 0x15 #define _CMP_NLE_UQ 0x16 #define _CMP_ORD_S 0x17 #define _CMP_EQ_US 0x18 #define _CMP_NGE_UQ 0x19 #define _CMP_NGT_UQ 0x1a #define _CMP_FALSE_OS 0x1b #define _CMP_NEQ_OS 0x1c #define _CMP_GE_OQ 0x1d #define _CMP_GT_OQ 0x1e #define _CMP_TRUE_US 0x1f __funline __m256d _mm256_add_pd(__m256d __A, __m256d __B) { return (__m256d)((__v4df)__A + (__v4df)__B); } __funline __m256 _mm256_add_ps(__m256 __A, __m256 __B) { return (__m256)((__v8sf)__A + (__v8sf)__B); } __funline __m256d _mm256_addsub_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_addsubpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_addsub_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_addsubps256((__v8sf)__A, (__v8sf)__B); } __funline __m256d _mm256_and_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_andpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_and_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_andps256((__v8sf)__A, (__v8sf)__B); } __funline __m256d _mm256_andnot_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_andnpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_andnot_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_andnps256((__v8sf)__A, (__v8sf)__B); } #ifdef __OPTIMIZE__ __funline __m256d _mm256_blend_pd(__m256d __X, __m256d __Y, const int __M) { return (__m256d)__builtin_ia32_blendpd256((__v4df)__X, (__v4df)__Y, __M); } __funline __m256 _mm256_blend_ps(__m256 __X, __m256 __Y, const int __M) { return (__m256)__builtin_ia32_blendps256((__v8sf)__X, (__v8sf)__Y, __M); } #else #define _mm256_blend_pd(X, Y, M) \ ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(X), \ (__v4df)(__m256d)(Y), (int)(M))) #define _mm256_blend_ps(X, Y, M) \ ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), \ (int)(M))) #endif __funline __m256d _mm256_blendv_pd(__m256d __X, __m256d __Y, __m256d __M) { return (__m256d)__builtin_ia32_blendvpd256((__v4df)__X, (__v4df)__Y, (__v4df)__M); } __funline __m256 _mm256_blendv_ps(__m256 __X, __m256 __Y, __m256 __M) { return (__m256)__builtin_ia32_blendvps256((__v8sf)__X, (__v8sf)__Y, (__v8sf)__M); } __funline __m256d _mm256_div_pd(__m256d __A, __m256d __B) { return (__m256d)((__v4df)__A / (__v4df)__B); } __funline __m256 _mm256_div_ps(__m256 __A, __m256 __B) { return (__m256)((__v8sf)__A / (__v8sf)__B); } #ifdef __OPTIMIZE__ __funline __m256 _mm256_dp_ps(__m256 __X, __m256 __Y, const int __M) { return (__m256)__builtin_ia32_dpps256((__v8sf)__X, (__v8sf)__Y, __M); } #else #define _mm256_dp_ps(X, Y, M) \ ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), \ (int)(M))) #endif __funline __m256d _mm256_hadd_pd(__m256d __X, __m256d __Y) { return (__m256d)__builtin_ia32_haddpd256((__v4df)__X, (__v4df)__Y); } __funline __m256 _mm256_hadd_ps(__m256 __X, __m256 __Y) { return (__m256)__builtin_ia32_haddps256((__v8sf)__X, (__v8sf)__Y); } __funline __m256d _mm256_hsub_pd(__m256d __X, __m256d __Y) { return (__m256d)__builtin_ia32_hsubpd256((__v4df)__X, (__v4df)__Y); } __funline __m256 _mm256_hsub_ps(__m256 __X, __m256 __Y) { return (__m256)__builtin_ia32_hsubps256((__v8sf)__X, (__v8sf)__Y); } __funline __m256d _mm256_max_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_maxpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_max_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_maxps256((__v8sf)__A, (__v8sf)__B); } __funline __m256d _mm256_min_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_minpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_min_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_minps256((__v8sf)__A, (__v8sf)__B); } __funline __m256d _mm256_mul_pd(__m256d __A, __m256d __B) { return (__m256d)((__v4df)__A * (__v4df)__B); } __funline __m256 _mm256_mul_ps(__m256 __A, __m256 __B) { return (__m256)((__v8sf)__A * (__v8sf)__B); } __funline __m256d _mm256_or_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_orpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_or_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_orps256((__v8sf)__A, (__v8sf)__B); } #ifdef __OPTIMIZE__ __funline __m256d _mm256_shuffle_pd(__m256d __A, __m256d __B, const int __mask) { return (__m256d)__builtin_ia32_shufpd256((__v4df)__A, (__v4df)__B, __mask); } __funline __m256 _mm256_shuffle_ps(__m256 __A, __m256 __B, const int __mask) { return (__m256)__builtin_ia32_shufps256((__v8sf)__A, (__v8sf)__B, __mask); } #else #define _mm256_shuffle_pd(A, B, N) \ ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(A), \ (__v4df)(__m256d)(B), (int)(N))) #define _mm256_shuffle_ps(A, B, N) \ ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(A), (__v8sf)(__m256)(B), \ (int)(N))) #endif __funline __m256d _mm256_sub_pd(__m256d __A, __m256d __B) { return (__m256d)((__v4df)__A - (__v4df)__B); } __funline __m256 _mm256_sub_ps(__m256 __A, __m256 __B) { return (__m256)((__v8sf)__A - (__v8sf)__B); } __funline __m256d _mm256_xor_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_xorpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_xor_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_xorps256((__v8sf)__A, (__v8sf)__B); } #ifdef __OPTIMIZE__ __funline __m128d _mm_cmp_pd(__m128d __X, __m128d __Y, const int __P) { return (__m128d)__builtin_ia32_cmppd((__v2df)__X, (__v2df)__Y, __P); } __funline __m128 _mm_cmp_ps(__m128 __X, __m128 __Y, const int __P) { return (__m128)__builtin_ia32_cmpps((__v4sf)__X, (__v4sf)__Y, __P); } __funline __m256d _mm256_cmp_pd(__m256d __X, __m256d __Y, const int __P) { return (__m256d)__builtin_ia32_cmppd256((__v4df)__X, (__v4df)__Y, __P); } __funline __m256 _mm256_cmp_ps(__m256 __X, __m256 __Y, const int __P) { return (__m256)__builtin_ia32_cmpps256((__v8sf)__X, (__v8sf)__Y, __P); } __funline __m128d _mm_cmp_sd(__m128d __X, __m128d __Y, const int __P) { return (__m128d)__builtin_ia32_cmpsd((__v2df)__X, (__v2df)__Y, __P); } __funline __m128 _mm_cmp_ss(__m128 __X, __m128 __Y, const int __P) { return (__m128)__builtin_ia32_cmpss((__v4sf)__X, (__v4sf)__Y, __P); } #else #define _mm_cmp_pd(X, Y, P) \ ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ (int)(P))) #define _mm_cmp_ps(X, Y, P) \ ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ (int)(P))) #define _mm256_cmp_pd(X, Y, P) \ ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(X), \ (__v4df)(__m256d)(Y), (int)(P))) #define _mm256_cmp_ps(X, Y, P) \ ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), \ (int)(P))) #define _mm_cmp_sd(X, Y, P) \ ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ (int)(P))) #define _mm_cmp_ss(X, Y, P) \ ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ (int)(P))) #endif __funline __m256d _mm256_cvtepi32_pd(__m128i __A) { return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si)__A); } __funline __m256 _mm256_cvtepi32_ps(__m256i __A) { return (__m256)__builtin_ia32_cvtdq2ps256((__v8si)__A); } __funline __m128 _mm256_cvtpd_ps(__m256d __A) { return (__m128)__builtin_ia32_cvtpd2ps256((__v4df)__A); } __funline __m256i _mm256_cvtps_epi32(__m256 __A) { return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf)__A); } __funline __m256d _mm256_cvtps_pd(__m128 __A) { return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf)__A); } __funline __m128i _mm256_cvttpd_epi32(__m256d __A) { return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df)__A); } __funline __m128i _mm256_cvtpd_epi32(__m256d __A) { return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df)__A); } __funline __m256i _mm256_cvttps_epi32(__m256 __A) { return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf)__A); } __funline double _mm256_cvtsd_f64(__m256d __A) { return __A[0]; } __funline float _mm256_cvtss_f32(__m256 __A) { return __A[0]; } #ifdef __OPTIMIZE__ __funline __m128d _mm256_extractf128_pd(__m256d __X, const int __N) { return (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__X, __N); } __funline __m128 _mm256_extractf128_ps(__m256 __X, const int __N) { return (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__X, __N); } __funline __m128i _mm256_extractf128_si256(__m256i __X, const int __N) { return (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__X, __N); } __funline int _mm256_extract_epi32(__m256i __X, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 2); return _mm_extract_epi32(__Y, __N % 4); } __funline int _mm256_extract_epi16(__m256i __X, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 3); return _mm_extract_epi16(__Y, __N % 8); } __funline int _mm256_extract_epi8(__m256i __X, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 4); return _mm_extract_epi8(__Y, __N % 16); } #ifdef __x86_64__ __funline long long _mm256_extract_epi64(__m256i __X, const int __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 1); return _mm_extract_epi64(__Y, __N % 2); } #endif #else #define _mm256_extractf128_pd(X, N) \ ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(X), (int)(N))) #define _mm256_extractf128_ps(X, N) \ ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(X), (int)(N))) #define _mm256_extractf128_si256(X, N) \ ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(X), (int)(N))) #define _mm256_extract_epi32(X, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 2); \ _mm_extract_epi32(__Y, (N) % 4); \ })) #define _mm256_extract_epi16(X, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 3); \ _mm_extract_epi16(__Y, (N) % 8); \ })) #define _mm256_extract_epi8(X, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 4); \ _mm_extract_epi8(__Y, (N) % 16); \ })) #ifdef __x86_64__ #define _mm256_extract_epi64(X, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 1); \ _mm_extract_epi64(__Y, (N) % 2); \ })) #endif #endif __funline void _mm256_zeroall(void) { __builtin_ia32_vzeroall(); } __funline void _mm256_zeroupper(void) { __builtin_ia32_vzeroupper(); } __funline __m128d _mm_permutevar_pd(__m128d __A, __m128i __C) { return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__A, (__v2di)__C); } __funline __m256d _mm256_permutevar_pd(__m256d __A, __m256i __C) { return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__A, (__v4di)__C); } __funline __m128 _mm_permutevar_ps(__m128 __A, __m128i __C) { return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__A, (__v4si)__C); } __funline __m256 _mm256_permutevar_ps(__m256 __A, __m256i __C) { return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__A, (__v8si)__C); } #ifdef __OPTIMIZE__ __funline __m128d _mm_permute_pd(__m128d __X, const int __C) { return (__m128d)__builtin_ia32_vpermilpd((__v2df)__X, __C); } __funline __m256d _mm256_permute_pd(__m256d __X, const int __C) { return (__m256d)__builtin_ia32_vpermilpd256((__v4df)__X, __C); } __funline __m128 _mm_permute_ps(__m128 __X, const int __C) { return (__m128)__builtin_ia32_vpermilps((__v4sf)__X, __C); } __funline __m256 _mm256_permute_ps(__m256 __X, const int __C) { return (__m256)__builtin_ia32_vpermilps256((__v8sf)__X, __C); } #else #define _mm_permute_pd(X, C) \ ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(X), (int)(C))) #define _mm256_permute_pd(X, C) \ ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(X), (int)(C))) #define _mm_permute_ps(X, C) \ ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(X), (int)(C))) #define _mm256_permute_ps(X, C) \ ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(X), (int)(C))) #endif #ifdef __OPTIMIZE__ __funline __m256d _mm256_permute2f128_pd(__m256d __X, __m256d __Y, const int __C) { return (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__X, (__v4df)__Y, __C); } __funline __m256 _mm256_permute2f128_ps(__m256 __X, __m256 __Y, const int __C) { return (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__X, (__v8sf)__Y, __C); } __funline __m256i _mm256_permute2f128_si256(__m256i __X, __m256i __Y, const int __C) { return (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__X, (__v8si)__Y, __C); } #else #define _mm256_permute2f128_pd(X, Y, C) \ ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(X), \ (__v4df)(__m256d)(Y), (int)(C))) #define _mm256_permute2f128_ps(X, Y, C) \ ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(X), \ (__v8sf)(__m256)(Y), (int)(C))) #define _mm256_permute2f128_si256(X, Y, C) \ ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(X), \ (__v8si)(__m256i)(Y), (int)(C))) #endif __funline __m128 _mm_broadcast_ss(float const *__X) { return (__m128)__builtin_ia32_vbroadcastss(__X); } __funline __m256d _mm256_broadcast_sd(double const *__X) { return (__m256d)__builtin_ia32_vbroadcastsd256(__X); } __funline __m256 _mm256_broadcast_ss(float const *__X) { return (__m256)__builtin_ia32_vbroadcastss256(__X); } __funline __m256d _mm256_broadcast_pd(__m128d const *__X) { return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__X); } __funline __m256 _mm256_broadcast_ps(__m128 const *__X) { return (__m256)__builtin_ia32_vbroadcastf128_ps256(__X); } #ifdef __OPTIMIZE__ __funline __m256d _mm256_insertf128_pd(__m256d __X, __m128d __Y, const int __O) { return (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__X, (__v2df)__Y, __O); } __funline __m256 _mm256_insertf128_ps(__m256 __X, __m128 __Y, const int __O) { return (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__X, (__v4sf)__Y, __O); } __funline __m256i _mm256_insertf128_si256(__m256i __X, __m128i __Y, const int __O) { return (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__X, (__v4si)__Y, __O); } __funline __m256i _mm256_insert_epi32(__m256i __X, int __D, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 2); __Y = _mm_insert_epi32(__Y, __D, __N % 4); return _mm256_insertf128_si256(__X, __Y, __N >> 2); } __funline __m256i _mm256_insert_epi16(__m256i __X, int __D, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 3); __Y = _mm_insert_epi16(__Y, __D, __N % 8); return _mm256_insertf128_si256(__X, __Y, __N >> 3); } __funline __m256i _mm256_insert_epi8(__m256i __X, int __D, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 4); __Y = _mm_insert_epi8(__Y, __D, __N % 16); return _mm256_insertf128_si256(__X, __Y, __N >> 4); } #ifdef __x86_64__ __funline __m256i _mm256_insert_epi64(__m256i __X, long long __D, int const __N) { __m128i __Y = _mm256_extractf128_si256(__X, __N >> 1); __Y = _mm_insert_epi64(__Y, __D, __N % 2); return _mm256_insertf128_si256(__X, __Y, __N >> 1); } #endif #else #define _mm256_insertf128_pd(X, Y, O) \ ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(X), \ (__v2df)(__m128d)(Y), (int)(O))) #define _mm256_insertf128_ps(X, Y, O) \ ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(X), \ (__v4sf)(__m128)(Y), (int)(O))) #define _mm256_insertf128_si256(X, Y, O) \ ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(X), \ (__v4si)(__m128i)(Y), (int)(O))) #define _mm256_insert_epi32(X, D, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 2); \ __Y = _mm_insert_epi32(__Y, (D), (N) % 4); \ _mm256_insertf128_si256((X), __Y, (N) >> 2); \ })) #define _mm256_insert_epi16(X, D, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 3); \ __Y = _mm_insert_epi16(__Y, (D), (N) % 8); \ _mm256_insertf128_si256((X), __Y, (N) >> 3); \ })) #define _mm256_insert_epi8(X, D, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 4); \ __Y = _mm_insert_epi8(__Y, (D), (N) % 16); \ _mm256_insertf128_si256((X), __Y, (N) >> 4); \ })) #ifdef __x86_64__ #define _mm256_insert_epi64(X, D, N) \ (__extension__({ \ __m128i __Y = _mm256_extractf128_si256((X), (N) >> 1); \ __Y = _mm_insert_epi64(__Y, (D), (N) % 2); \ _mm256_insertf128_si256((X), __Y, (N) >> 1); \ })) #endif #endif __funline __m256d _mm256_load_pd(double const *__P) { return *(__m256d *)__P; } __funline void _mm256_store_pd(double *__P, __m256d __A) { *(__m256d *)__P = __A; } __funline __m256 _mm256_load_ps(float const *__P) { return *(__m256 *)__P; } __funline void _mm256_store_ps(float *__P, __m256 __A) { *(__m256 *)__P = __A; } __funline __m256d _mm256_loadu_pd(double const *__P) { return *(__m256d_u *)__P; } __funline void _mm256_storeu_pd(double *__P, __m256d __A) { *(__m256d_u *)__P = __A; } __funline __m256 _mm256_loadu_ps(float const *__P) { return *(__m256_u *)__P; } __funline void _mm256_storeu_ps(float *__P, __m256 __A) { *(__m256_u *)__P = __A; } __funline __m256i _mm256_load_si256(__m256i const *__P) { return *__P; } __funline void _mm256_store_si256(__m256i *__P, __m256i __A) { *__P = __A; } __funline __m256i _mm256_loadu_si256(__m256i_u const *__P) { return *__P; } __funline void _mm256_storeu_si256(__m256i_u *__P, __m256i __A) { *__P = __A; } __funline __m128d _mm_maskload_pd(double const *__P, __m128i __M) { return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__P, (__v2di)__M); } __funline void _mm_maskstore_pd(double *__P, __m128i __M, __m128d __A) { __builtin_ia32_maskstorepd((__v2df *)__P, (__v2di)__M, (__v2df)__A); } __funline __m256d _mm256_maskload_pd(double const *__P, __m256i __M) { return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__P, (__v4di)__M); } __funline void _mm256_maskstore_pd(double *__P, __m256i __M, __m256d __A) { __builtin_ia32_maskstorepd256((__v4df *)__P, (__v4di)__M, (__v4df)__A); } __funline __m128 _mm_maskload_ps(float const *__P, __m128i __M) { return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__P, (__v4si)__M); } __funline void _mm_maskstore_ps(float *__P, __m128i __M, __m128 __A) { __builtin_ia32_maskstoreps((__v4sf *)__P, (__v4si)__M, (__v4sf)__A); } __funline __m256 _mm256_maskload_ps(float const *__P, __m256i __M) { return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__P, (__v8si)__M); } __funline void _mm256_maskstore_ps(float *__P, __m256i __M, __m256 __A) { __builtin_ia32_maskstoreps256((__v8sf *)__P, (__v8si)__M, (__v8sf)__A); } __funline __m256 _mm256_movehdup_ps(__m256 __X) { return (__m256)__builtin_ia32_movshdup256((__v8sf)__X); } __funline __m256 _mm256_moveldup_ps(__m256 __X) { return (__m256)__builtin_ia32_movsldup256((__v8sf)__X); } __funline __m256d _mm256_movedup_pd(__m256d __X) { return (__m256d)__builtin_ia32_movddup256((__v4df)__X); } __funline __m256i _mm256_lddqu_si256(__m256i const *__P) { return (__m256i)__builtin_ia32_lddqu256((char const *)__P); } __funline void _mm256_stream_si256(__m256i *__A, __m256i __B) { __builtin_ia32_movntdq256((__v4di *)__A, (__v4di)__B); } __funline void _mm256_stream_pd(double *__A, __m256d __B) { __builtin_ia32_movntpd256(__A, (__v4df)__B); } __funline void _mm256_stream_ps(float *__P, __m256 __A) { __builtin_ia32_movntps256(__P, (__v8sf)__A); } __funline __m256 _mm256_rcp_ps(__m256 __A) { return (__m256)__builtin_ia32_rcpps256((__v8sf)__A); } __funline __m256 _mm256_rsqrt_ps(__m256 __A) { return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__A); } __funline __m256d _mm256_sqrt_pd(__m256d __A) { return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__A); } __funline __m256 _mm256_sqrt_ps(__m256 __A) { return (__m256)__builtin_ia32_sqrtps256((__v8sf)__A); } #ifdef __OPTIMIZE__ __funline __m256d _mm256_round_pd(__m256d __V, const int __M) { return (__m256d)__builtin_ia32_roundpd256((__v4df)__V, __M); } __funline __m256 _mm256_round_ps(__m256 __V, const int __M) { return (__m256)__builtin_ia32_roundps256((__v8sf)__V, __M); } #else #define _mm256_round_pd(V, M) \ ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (int)(M))) #define _mm256_round_ps(V, M) \ ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (int)(M))) #endif #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) __funline __m256d _mm256_unpackhi_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpckhpd256((__v4df)__A, (__v4df)__B); } __funline __m256d _mm256_unpacklo_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpcklpd256((__v4df)__A, (__v4df)__B); } __funline __m256 _mm256_unpackhi_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpckhps256((__v8sf)__A, (__v8sf)__B); } __funline __m256 _mm256_unpacklo_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpcklps256((__v8sf)__A, (__v8sf)__B); } __funline int _mm_testz_pd(__m128d __M, __m128d __V) { return __builtin_ia32_vtestzpd((__v2df)__M, (__v2df)__V); } __funline int _mm_testc_pd(__m128d __M, __m128d __V) { return __builtin_ia32_vtestcpd((__v2df)__M, (__v2df)__V); } __funline int _mm_testnzc_pd(__m128d __M, __m128d __V) { return __builtin_ia32_vtestnzcpd((__v2df)__M, (__v2df)__V); } __funline int _mm_testz_ps(__m128 __M, __m128 __V) { return __builtin_ia32_vtestzps((__v4sf)__M, (__v4sf)__V); } __funline int _mm_testc_ps(__m128 __M, __m128 __V) { return __builtin_ia32_vtestcps((__v4sf)__M, (__v4sf)__V); } __funline int _mm_testnzc_ps(__m128 __M, __m128 __V) { return __builtin_ia32_vtestnzcps((__v4sf)__M, (__v4sf)__V); } __funline int _mm256_testz_pd(__m256d __M, __m256d __V) { return __builtin_ia32_vtestzpd256((__v4df)__M, (__v4df)__V); } __funline int _mm256_testc_pd(__m256d __M, __m256d __V) { return __builtin_ia32_vtestcpd256((__v4df)__M, (__v4df)__V); } __funline int _mm256_testnzc_pd(__m256d __M, __m256d __V) { return __builtin_ia32_vtestnzcpd256((__v4df)__M, (__v4df)__V); } __funline int _mm256_testz_ps(__m256 __M, __m256 __V) { return __builtin_ia32_vtestzps256((__v8sf)__M, (__v8sf)__V); } __funline int _mm256_testc_ps(__m256 __M, __m256 __V) { return __builtin_ia32_vtestcps256((__v8sf)__M, (__v8sf)__V); } __funline int _mm256_testnzc_ps(__m256 __M, __m256 __V) { return __builtin_ia32_vtestnzcps256((__v8sf)__M, (__v8sf)__V); } __funline int _mm256_testz_si256(__m256i __M, __m256i __V) { return __builtin_ia32_ptestz256((__v4di)__M, (__v4di)__V); } __funline int _mm256_testc_si256(__m256i __M, __m256i __V) { return __builtin_ia32_ptestc256((__v4di)__M, (__v4di)__V); } __funline int _mm256_testnzc_si256(__m256i __M, __m256i __V) { return __builtin_ia32_ptestnzc256((__v4di)__M, (__v4di)__V); } __funline int _mm256_movemask_pd(__m256d __A) { return __builtin_ia32_movmskpd256((__v4df)__A); } __funline int _mm256_movemask_ps(__m256 __A) { return __builtin_ia32_movmskps256((__v8sf)__A); } __funline __m256d _mm256_undefined_pd(void) { __m256d __Y = __Y; return __Y; } __funline __m256 _mm256_undefined_ps(void) { __m256 __Y = __Y; return __Y; } __funline __m256i _mm256_undefined_si256(void) { __m256i __Y = __Y; return __Y; } __funline __m256d _mm256_setzero_pd(void) { return __extension__(__m256d){0.0, 0.0, 0.0, 0.0}; } __funline __m256 _mm256_setzero_ps(void) { return __extension__(__m256){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; } __funline __m256i _mm256_setzero_si256(void) { return __extension__(__m256i)(__v4di){0, 0, 0, 0}; } __funline __m256d _mm256_set_pd(double __A, double __B, double __C, double __D) { return __extension__(__m256d){__D, __C, __B, __A}; } __funline __m256 _mm256_set_ps(float __A, float __B, float __C, float __D, float __E, float __F, float __G, float __H) { return __extension__(__m256){__H, __G, __F, __E, __D, __C, __B, __A}; } __funline __m256i _mm256_set_epi32(int __A, int __B, int __C, int __D, int __E, int __F, int __G, int __H) { return __extension__(__m256i)(__v8si){__H, __G, __F, __E, __D, __C, __B, __A}; } __funline __m256i _mm256_set_epi16(short __q15, short __q14, short __q13, short __q12, short __q11, short __q10, short __q09, short __q08, short __q07, short __q06, short __q05, short __q04, short __q03, short __q02, short __q01, short __q00) { return __extension__(__m256i)(__v16hi){ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15}; } __funline __m256i _mm256_set_epi8(char __q31, char __q30, char __q29, char __q28, char __q27, char __q26, char __q25, char __q24, char __q23, char __q22, char __q21, char __q20, char __q19, char __q18, char __q17, char __q16, char __q15, char __q14, char __q13, char __q12, char __q11, char __q10, char __q09, char __q08, char __q07, char __q06, char __q05, char __q04, char __q03, char __q02, char __q01, char __q00) { return __extension__(__m256i)(__v32qi){ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31}; } __funline __m256i _mm256_set_epi64x(long long __A, long long __B, long long __C, long long __D) { return __extension__(__m256i)(__v4di){__D, __C, __B, __A}; } __funline __m256d _mm256_set1_pd(double __A) { return __extension__(__m256d){__A, __A, __A, __A}; } __funline __m256 _mm256_set1_ps(float __A) { return __extension__(__m256){__A, __A, __A, __A, __A, __A, __A, __A}; } __funline __m256i _mm256_set1_epi32(int __A) { return __extension__(__m256i)(__v8si){__A, __A, __A, __A, __A, __A, __A, __A}; } __funline __m256i _mm256_set1_epi16(short __A) { return _mm256_set_epi16(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A); } __funline __m256i _mm256_set1_epi8(char __A) { return _mm256_set_epi8(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A); } __funline __m256i _mm256_set1_epi64x(long long __A) { return __extension__(__m256i)(__v4di){__A, __A, __A, __A}; } __funline __m256d _mm256_setr_pd(double __A, double __B, double __C, double __D) { return _mm256_set_pd(__D, __C, __B, __A); } __funline __m256 _mm256_setr_ps(float __A, float __B, float __C, float __D, float __E, float __F, float __G, float __H) { return _mm256_set_ps(__H, __G, __F, __E, __D, __C, __B, __A); } __funline __m256i _mm256_setr_epi32(int __A, int __B, int __C, int __D, int __E, int __F, int __G, int __H) { return _mm256_set_epi32(__H, __G, __F, __E, __D, __C, __B, __A); } __funline __m256i _mm256_setr_epi16(short __q15, short __q14, short __q13, short __q12, short __q11, short __q10, short __q09, short __q08, short __q07, short __q06, short __q05, short __q04, short __q03, short __q02, short __q01, short __q00) { return _mm256_set_epi16(__q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15); } __funline __m256i _mm256_setr_epi8(char __q31, char __q30, char __q29, char __q28, char __q27, char __q26, char __q25, char __q24, char __q23, char __q22, char __q21, char __q20, char __q19, char __q18, char __q17, char __q16, char __q15, char __q14, char __q13, char __q12, char __q11, char __q10, char __q09, char __q08, char __q07, char __q06, char __q05, char __q04, char __q03, char __q02, char __q01, char __q00) { return _mm256_set_epi8(__q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31); } __funline __m256i _mm256_setr_epi64x(long long __A, long long __B, long long __C, long long __D) { return _mm256_set_epi64x(__D, __C, __B, __A); } __funline __m256 _mm256_castpd_ps(__m256d __A) { return (__m256)__A; } __funline __m256i _mm256_castpd_si256(__m256d __A) { return (__m256i)__A; } __funline __m256d _mm256_castps_pd(__m256 __A) { return (__m256d)__A; } __funline __m256i _mm256_castps_si256(__m256 __A) { return (__m256i)__A; } __funline __m256 _mm256_castsi256_ps(__m256i __A) { return (__m256)__A; } __funline __m256d _mm256_castsi256_pd(__m256i __A) { return (__m256d)__A; } __funline __m128d _mm256_castpd256_pd128(__m256d __A) { return (__m128d)__builtin_ia32_pd_pd256((__v4df)__A); } __funline __m128 _mm256_castps256_ps128(__m256 __A) { return (__m128)__builtin_ia32_ps_ps256((__v8sf)__A); } __funline __m128i _mm256_castsi256_si128(__m256i __A) { return (__m128i)__builtin_ia32_si_si256((__v8si)__A); } __funline __m256d _mm256_castpd128_pd256(__m128d __A) { return (__m256d)__builtin_ia32_pd256_pd((__v2df)__A); } __funline __m256 _mm256_castps128_ps256(__m128 __A) { return (__m256)__builtin_ia32_ps256_ps((__v4sf)__A); } __funline __m256i _mm256_castsi128_si256(__m128i __A) { return (__m256i)__builtin_ia32_si256_si((__v4si)__A); } __funline __m256 _mm256_set_m128(__m128 __H, __m128 __L) { return _mm256_insertf128_ps(_mm256_castps128_ps256(__L), __H, 1); } __funline __m256d _mm256_set_m128d(__m128d __H, __m128d __L) { return _mm256_insertf128_pd(_mm256_castpd128_pd256(__L), __H, 1); } __funline __m256i _mm256_set_m128i(__m128i __H, __m128i __L) { return _mm256_insertf128_si256(_mm256_castsi128_si256(__L), __H, 1); } __funline __m256 _mm256_setr_m128(__m128 __L, __m128 __H) { return _mm256_set_m128(__H, __L); } __funline __m256d _mm256_setr_m128d(__m128d __L, __m128d __H) { return _mm256_set_m128d(__H, __L); } __funline __m256i _mm256_setr_m128i(__m128i __L, __m128i __H) { return _mm256_set_m128i(__H, __L); } #ifdef __DISABLE_AVX__ #undef __DISABLE_AVX__ #pragma GCC pop_options #endif /* __DISABLE_AVX__ */ #endif /* _AVXINTRIN_H_INCLUDED */
35,804
1,034
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512cdintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512cdintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512CDINTRIN_H_INCLUDED #define _AVX512CDINTRIN_H_INCLUDED #ifndef __AVX512CD__ #pragma GCC push_options #pragma GCC target("avx512cd") #define __DISABLE_AVX512CD__ #endif /* __AVX512CD__ */ typedef long long __v8di __attribute__((__vector_size__(64))); typedef int __v16si __attribute__((__vector_size__(64))); typedef long long __m512i __attribute__((__vector_size__(64), __may_alias__)); typedef double __m512d __attribute__((__vector_size__(64), __may_alias__)); typedef unsigned char __mmask8; typedef unsigned short __mmask16; __funline __m512i _mm512_conflict_epi32(__m512i __A) { return (__m512i)__builtin_ia32_vpconflictsi_512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)-1); } __funline __m512i _mm512_mask_conflict_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpconflictsi_512_mask( (__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_conflict_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpconflictsi_512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_conflict_epi64(__m512i __A) { return (__m512i)__builtin_ia32_vpconflictdi_512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1); } __funline __m512i _mm512_mask_conflict_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpconflictdi_512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_conflict_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_vpconflictdi_512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_lzcnt_epi64(__m512i __A) { return (__m512i)__builtin_ia32_vplzcntq_512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)-1); } __funline __m512i _mm512_mask_lzcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_vplzcntq_512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_lzcnt_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_vplzcntq_512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_lzcnt_epi32(__m512i __A) { return (__m512i)__builtin_ia32_vplzcntd_512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)-1); } __funline __m512i _mm512_mask_lzcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_vplzcntd_512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_lzcnt_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_vplzcntd_512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_broadcastmb_epi64(__mmask8 __A) { return (__m512i)__builtin_ia32_broadcastmb512(__A); } __funline __m512i _mm512_broadcastmw_epi32(__mmask16 __A) { return (__m512i)__builtin_ia32_broadcastmw512(__A); } #ifdef __DISABLE_AVX512CD__ #undef __DISABLE_AVX512CD__ #pragma GCC pop_options #endif /* __DISABLE_AVX512CD__ */ #endif /* _AVX512CDINTRIN_H_INCLUDED */
3,636
101
jart/cosmopolitan
false
cosmopolitan/third_party/intel/x86intrin.internal.h
#ifndef _X86INTRIN_H_INCLUDED #define _X86INTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/ia32intrin.internal.h" #ifndef __iamcu__ /* clang-format off */ #include "third_party/intel/immintrin.internal.h" #include "third_party/intel/mm3dnow.internal.h" #include "third_party/intel/fma4intrin.internal.h" #include "third_party/intel/xopintrin.internal.h" #include "third_party/intel/lwpintrin.internal.h" #include "third_party/intel/tbmintrin.internal.h" #include "third_party/intel/popcntintrin.internal.h" #include "third_party/intel/mwaitxintrin.internal.h" #include "third_party/intel/clzerointrin.internal.h" /* clang-format on */ #endif /* __iamcu__ */ #endif /* __x86_64__ */ #endif /* _X86INTRIN_H_INCLUDED */
734
22
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vlbwintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VLBWINTRIN_H_INCLUDED #define _AVX512VLBWINTRIN_H_INCLUDED #if !defined(__AVX512VL__) || !defined(__AVX512BW__) #pragma GCC push_options #pragma GCC target("avx512vl,avx512bw") #define __DISABLE_AVX512VLBW__ #endif /* __AVX512VLBW__ */ __funline __m256i _mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdquqi256_mask((__v32qi)__A, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdquqi256_mask( (__v32qi)__A, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdquqi128_mask((__v16qi)__A, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_mov_epi8(__mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdquqi128_mask( (__v16qi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline void _mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) { __builtin_ia32_storedquqi256_mask((char *)__P, (__v32qi)__A, (__mmask32)__U); } __funline void _mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A) { __builtin_ia32_storedquqi128_mask((char *)__P, (__v16qi)__A, (__mmask16)__U); } __funline __m256i _mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddquhi256_mask( (const short *)__P, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddquhi256_mask( (const short *)__P, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddquhi128_mask((const short *)__P, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddquhi128_mask( (const short *)__P, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdquhi256_mask((__v16hi)__A, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdquhi256_mask( (__v16hi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdquhi128_mask((__v8hi)__A, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mov_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdquhi128_mask( (__v8hi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddquqi256_mask( (const char *)__P, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddquqi256_mask( (const char *)__P, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddquqi128_mask( (const char *)__P, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddquqi128_mask( (const char *)__P, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm256_cvtepi16_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovwb256_mask( (__v16hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm256_mask_cvtepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) { __builtin_ia32_pmovwb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M); } __funline __m128i _mm256_mask_cvtepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovwb256_mask((__v16hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtepi16_epi8(__mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovwb256_mask( (__v16hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi16_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovswb128_mask( (__v8hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovswb128mem_mask((__v8qi *)__P, (__v8hi)__A, __M); } __funline __m128i _mm_mask_cvtsepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovswb128_mask((__v8hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtsepi16_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovswb128_mask( (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi16_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovswb256_mask( (__v16hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm256_mask_cvtsepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) { __builtin_ia32_pmovswb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M); } __funline __m128i _mm256_mask_cvtsepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovswb256_mask((__v16hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi16_epi8(__mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovswb256_mask( (__v16hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi16_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovuswb128_mask( (__v8hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovuswb128mem_mask((__v8qi *)__P, (__v8hi)__A, __M); } __funline __m128i _mm_mask_cvtusepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovuswb128_mask((__v8hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtusepi16_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovuswb128_mask( (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi16_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovuswb256_mask( (__v16hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm256_mask_cvtusepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) { __builtin_ia32_pmovuswb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M); } __funline __m128i _mm256_mask_cvtusepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovuswb256_mask((__v16hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi16_epi8(__mmask16 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovuswb256_mask( (__v16hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m256i _mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastb256_mask((__v16qi)__A, (__v32qi)__O, __M); } __funline __m256i _mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastb256_mask( (__v16qi)__A, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) { return (__m256i)__builtin_ia32_pbroadcastb256_gpr_mask(__A, (__v32qi)__O, __M); } __funline __m256i _mm256_maskz_set1_epi8(__mmask32 __M, char __A) { return (__m256i)__builtin_ia32_pbroadcastb256_gpr_mask( __A, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_broadcastb_epi8(__m128i __O, __mmask16 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastb128_mask((__v16qi)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastb128_mask( (__v16qi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_set1_epi8(__m128i __O, __mmask16 __M, char __A) { return (__m128i)__builtin_ia32_pbroadcastb128_gpr_mask(__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_set1_epi8(__mmask16 __M, char __A) { return (__m128i)__builtin_ia32_pbroadcastb128_gpr_mask( __A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m256i _mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastw256_mask((__v8hi)__A, (__v16hi)__O, __M); } __funline __m256i _mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastw256_mask( (__v8hi)__A, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) { return (__m256i)__builtin_ia32_pbroadcastw256_gpr_mask(__A, (__v16hi)__O, __M); } __funline __m256i _mm256_maskz_set1_epi16(__mmask16 __M, short __A) { return (__m256i)__builtin_ia32_pbroadcastw256_gpr_mask( __A, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastw128_mask((__v8hi)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastw128_mask( (__v8hi)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) { return (__m128i)__builtin_ia32_pbroadcastw128_gpr_mask(__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_set1_epi16(__mmask8 __M, short __A) { return (__m128i)__builtin_ia32_pbroadcastw128_gpr_mask( __A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m256i _mm256_permutexvar_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarhi256_mask( (__v16hi)__B, (__v16hi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)-1); } __funline __m256i _mm256_maskz_permutexvar_epi16(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarhi256_mask( (__v16hi)__B, (__v16hi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__M); } __funline __m256i _mm256_mask_permutexvar_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_permvarhi256_mask( (__v16hi)__B, (__v16hi)__A, (__v16hi)__W, (__mmask16)__M); } __funline __m128i _mm_permutexvar_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarhi128_mask( (__v8hi)__B, (__v8hi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_maskz_permutexvar_epi16(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarhi128_mask( (__v8hi)__B, (__v8hi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__M); } __funline __m128i _mm_mask_permutexvar_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_permvarhi128_mask((__v8hi)__B, (__v8hi)__A, (__v8hi)__W, (__mmask8)__M); } __funline __m256i _mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varhi256_mask( (__v16hi)__I /* idx */, (__v16hi)__A, (__v16hi)__B, (__mmask16)-1); } __funline __m256i _mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varhi256_mask( (__v16hi)__I /* idx */, (__v16hi)__A, (__v16hi)__B, (__mmask16)__U); } __funline __m256i _mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpermi2varhi256_mask((__v16hi)__A, (__v16hi)__I /* idx */, (__v16hi)__B, (__mmask16)__U); } __funline __m256i _mm256_maskz_permutex2var_epi16(__mmask16 __U, __m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varhi256_maskz( (__v16hi)__I /* idx */, (__v16hi)__A, (__v16hi)__B, (__mmask16)__U); } __funline __m128i _mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varhi128_mask((__v8hi)__I /* idx */, (__v8hi)__A, (__v8hi)__B, (__mmask8)-1); } __funline __m128i _mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varhi128_mask((__v8hi)__I /* idx */, (__v8hi)__A, (__v8hi)__B, (__mmask8)__U); } __funline __m128i _mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpermi2varhi128_mask((__v8hi)__A, (__v8hi)__I /* idx */, (__v8hi)__B, (__mmask8)__U); } __funline __m128i _mm_maskz_permutex2var_epi16(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varhi128_maskz((__v8hi)__I /* idx */, (__v8hi)__A, (__v8hi)__B, (__mmask8)__U); } __funline __m256i _mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmaddubsw256_mask( (__v32qi)__X, (__v32qi)__Y, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmaddubsw256_mask( (__v32qi)__X, (__v32qi)__Y, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaddubsw128_mask((__v16qi)__X, (__v16qi)__Y, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaddubsw128_mask( (__v16qi)__X, (__v16qi)__Y, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaddwd256_mask((__v16hi)__A, (__v16hi)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaddwd256_mask((__v16hi)__A, (__v16hi)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaddwd128_mask((__v8hi)__A, (__v8hi)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaddwd128_mask( (__v8hi)__A, (__v8hi)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __mmask16 _mm_movepi8_mask(__m128i __A) { return (__mmask16)__builtin_ia32_cvtb2mask128((__v16qi)__A); } __funline __mmask32 _mm256_movepi8_mask(__m256i __A) { return (__mmask32)__builtin_ia32_cvtb2mask256((__v32qi)__A); } __funline __mmask8 _mm_movepi16_mask(__m128i __A) { return (__mmask8)__builtin_ia32_cvtw2mask128((__v8hi)__A); } __funline __mmask16 _mm256_movepi16_mask(__m256i __A) { return (__mmask16)__builtin_ia32_cvtw2mask256((__v16hi)__A); } __funline __m128i _mm_movm_epi8(__mmask16 __A) { return (__m128i)__builtin_ia32_cvtmask2b128(__A); } __funline __m256i _mm256_movm_epi8(__mmask32 __A) { return (__m256i)__builtin_ia32_cvtmask2b256(__A); } __funline __m128i _mm_movm_epi16(__mmask8 __A) { return (__m128i)__builtin_ia32_cvtmask2w128(__A); } __funline __m256i _mm256_movm_epi16(__mmask16 __A) { return (__m256i)__builtin_ia32_cvtmask2w256(__A); } __funline __mmask16 _mm_test_epi8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ptestmb128((__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __mmask16 _mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ptestmb128((__v16qi)__A, (__v16qi)__B, __U); } __funline __mmask32 _mm256_test_epi8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ptestmb256((__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __mmask32 _mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ptestmb256((__v32qi)__A, (__v32qi)__B, __U); } __funline __mmask8 _mm_test_epi16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmw128((__v8hi)__A, (__v8hi)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmw128((__v8hi)__A, (__v8hi)__B, __U); } __funline __mmask16 _mm256_test_epi16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ptestmw256((__v16hi)__A, (__v16hi)__B, (__mmask16)-1); } __funline __mmask16 _mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ptestmw256((__v16hi)__A, (__v16hi)__B, __U); } __funline __m256i _mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__M); } __funline __m256i _mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__M); } __funline __m128i _mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminuw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__M); } __funline __m128i _mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminuw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__M); } __funline __m256i _mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__M); } __funline __m256i _mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__M); } __funline __m256i _mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxub256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxub256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__M); } __funline __m128i _mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxub128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxub128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__M); } __funline __m256i _mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__M); } __funline __m128i _mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__M); } __funline __m256i _mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminub256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminub256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__M); } __funline __m128i _mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminub128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminub128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__M); } __funline __m256i _mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__M); } __funline __m256i _mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__M); } __funline __m128i _mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__M); } __funline __m128i _mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__M); } __funline __m256i _mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__M); } __funline __m256i _mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__M); } __funline __m128i _mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__M); } __funline __m128i _mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__M); } __funline __m256i _mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__M); } __funline __m256i _mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__M); } __funline __m128i _mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxuw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__M); } __funline __m128i _mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxuw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__M); } __funline __m128i _mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__M); } __funline __m128i _mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__M); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_mask_alignr_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B, const int __N) { return (__m256i)__builtin_ia32_palignr256_mask( (__v4di)__A, (__v4di)__B, __N * 8, (__v4di)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_alignr_epi8(__mmask32 __U, __m256i __A, __m256i __B, const int __N) { return (__m256i)__builtin_ia32_palignr256_mask( (__v4di)__A, (__v4di)__B, __N * 8, (__v4di)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_alignr_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B, const int __N) { return (__m128i)__builtin_ia32_palignr128_mask( (__v2di)__A, (__v2di)__B, __N * 8, (__v2di)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_alignr_epi8(__mmask16 __U, __m128i __A, __m128i __B, const int __N) { return (__m128i)__builtin_ia32_palignr128_mask( (__v2di)__A, (__v2di)__B, __N * 8, (__v2di)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_dbsad_epu8(__m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_dbpsadbw256_mask( (__v32qi)__A, (__v32qi)__B, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)-1); } __funline __m256i _mm256_mask_dbsad_epu8(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_dbpsadbw256_mask( (__v32qi)__A, (__v32qi)__B, __imm, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_dbsad_epu8(__mmask16 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_dbpsadbw256_mask( (__v32qi)__A, (__v32qi)__B, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_dbsad_epu8(__m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_dbpsadbw128_mask( (__v16qi)__A, (__v16qi)__B, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_dbsad_epu8(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_dbpsadbw128_mask( (__v16qi)__A, (__v16qi)__B, __imm, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_dbsad_epu8(__mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_dbpsadbw128_mask( (__v16qi)__A, (__v16qi)__B, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) { return (__m128i)__builtin_ia32_blendmw_128_mask((__v8hi)__A, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) { return (__m128i)__builtin_ia32_blendmb_128_mask((__v16qi)__A, (__v16qi)__W, (__mmask16)__U); } __funline __m256i _mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) { return (__m256i)__builtin_ia32_blendmw_256_mask((__v16hi)__A, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) { return (__m256i)__builtin_ia32_blendmb_256_mask((__v32qi)__A, (__v32qi)__W, (__mmask32)__U); } __funline __mmask8 _mm_mask_cmp_epi16_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_cmp_epi16_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, __P, (__mmask8)-1); } __funline __mmask16 _mm256_mask_cmp_epi16_mask(__mmask16 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, __P, (__mmask16)__U); } __funline __mmask16 _mm256_cmp_epi16_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, __P, (__mmask16)-1); } __funline __mmask16 _mm_mask_cmp_epi8_mask(__mmask16 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, __P, (__mmask16)__U); } __funline __mmask16 _mm_cmp_epi8_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, __P, (__mmask16)-1); } __funline __mmask32 _mm256_mask_cmp_epi8_mask(__mmask32 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, __P, (__mmask32)__U); } __funline __mmask32 _mm256_cmp_epi8_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, __P, (__mmask32)-1); } __funline __mmask8 _mm_mask_cmp_epu16_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_cmp_epu16_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, __P, (__mmask8)-1); } __funline __mmask16 _mm256_mask_cmp_epu16_mask(__mmask16 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, __P, (__mmask16)__U); } __funline __mmask16 _mm256_cmp_epu16_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, __P, (__mmask16)-1); } __funline __mmask16 _mm_mask_cmp_epu8_mask(__mmask16 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, __P, (__mmask16)__U); } __funline __mmask16 _mm_cmp_epu8_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, __P, (__mmask16)-1); } __funline __mmask32 _mm256_mask_cmp_epu8_mask(__mmask32 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, __P, (__mmask32)__U); } __funline __mmask32 _mm256_cmp_epu8_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, __P, (__mmask32)-1); } __funline __m256i _mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)__A, __imm, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrlwi256_mask( (__v16hi)__A, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)__A, __imm, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srli_epi16(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrlwi128_mask( (__v8hi)__A, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_shufflehi_epi16(__m256i __W, __mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_pshufhw256_mask((__v16hi)__A, __imm, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_shufflehi_epi16(__mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_pshufhw256_mask( (__v16hi)__A, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_shufflehi_epi16(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_pshufhw128_mask((__v8hi)__A, __imm, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_shufflehi_epi16(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_pshufhw128_mask( (__v8hi)__A, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_shufflelo_epi16(__m256i __W, __mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_pshuflw256_mask((__v16hi)__A, __imm, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_shufflelo_epi16(__mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_pshuflw256_mask( (__v16hi)__A, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_shufflelo_epi16(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_pshuflw128_mask((__v8hi)__A, __imm, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_shufflelo_epi16(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_pshuflw128_mask( (__v8hi)__A, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)__A, __imm, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrawi256_mask( (__v16hi)__A, __imm, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)__A, __imm, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrawi128_mask( (__v8hi)__A, __imm, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)__A, __B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_psllwi256_mask( (__v16hi)__A, __B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)__A, __B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_slli_epi16(__mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_psllwi128_mask( (__v8hi)__A, __B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } #else #define _mm256_mask_alignr_epi8(W, U, X, Y, N) \ ((__m256i)__builtin_ia32_palignr256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(N * 8), \ (__v4di)(__m256i)(X), (__mmask32)(U))) #define _mm256_mask_srli_epi16(W, U, A, B) \ ((__m256i)__builtin_ia32_psrlwi256_mask( \ (__v16hi)(__m256i)(A), (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_srli_epi16(U, A, B) \ ((__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(B), \ (__v16hi)_mm256_setzero_si256(), \ (__mmask16)(U))) #define _mm_mask_srli_epi16(W, U, A, B) \ ((__m128i)__builtin_ia32_psrlwi128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srli_epi16(U, A, B) \ ((__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(B), \ (__v8hi)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_srai_epi16(W, U, A, B) \ ((__m256i)__builtin_ia32_psrawi256_mask( \ (__v16hi)(__m256i)(A), (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_srai_epi16(U, A, B) \ ((__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(B), \ (__v16hi)_mm256_setzero_si256(), \ (__mmask16)(U))) #define _mm_mask_srai_epi16(W, U, A, B) \ ((__m128i)__builtin_ia32_psrawi128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srai_epi16(U, A, B) \ ((__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(B), \ (__v8hi)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_shufflehi_epi16(W, U, A, B) \ ((__m256i)__builtin_ia32_pshufhw256_mask( \ (__v16hi)(__m256i)(A), (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_shufflehi_epi16(U, A, B) \ ((__m256i)__builtin_ia32_pshufhw256_mask( \ (__v16hi)(__m256i)(A), (int)(B), \ (__v16hi)(__m256i)_mm256_setzero_si256(), (__mmask16)(U))) #define _mm_mask_shufflehi_epi16(W, U, A, B) \ ((__m128i)__builtin_ia32_pshufhw128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_shufflehi_epi16(U, A, B) \ ((__m128i)__builtin_ia32_pshufhw128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_shufflelo_epi16(W, U, A, B) \ ((__m256i)__builtin_ia32_pshuflw256_mask( \ (__v16hi)(__m256i)(A), (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_shufflelo_epi16(U, A, B) \ ((__m256i)__builtin_ia32_pshuflw256_mask( \ (__v16hi)(__m256i)(A), (int)(B), \ (__v16hi)(__m256i)_mm256_setzero_si256(), (__mmask16)(U))) #define _mm_mask_shufflelo_epi16(W, U, A, B) \ ((__m128i)__builtin_ia32_pshuflw128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_shufflelo_epi16(U, A, B) \ ((__m128i)__builtin_ia32_pshuflw128_mask( \ (__v8hi)(__m128i)(A), (int)(B), (__v8hi)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_maskz_alignr_epi8(U, X, Y, N) \ ((__m256i)__builtin_ia32_palignr256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(N * 8), \ (__v4di)(__m256i)_mm256_setzero_si256(), (__mmask32)(U))) #define _mm_mask_alignr_epi8(W, U, X, Y, N) \ ((__m128i)__builtin_ia32_palignr128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(N * 8), \ (__v2di)(__m128i)(X), (__mmask16)(U))) #define _mm_maskz_alignr_epi8(U, X, Y, N) \ ((__m128i)__builtin_ia32_palignr128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(N * 8), \ (__v2di)(__m128i)_mm_setzero_si128(), (__mmask16)(U))) #define _mm_mask_slli_epi16(W, U, X, C) \ ((__m128i)__builtin_ia32_psllwi128_mask( \ (__v8hi)(__m128i)(X), (int)(C), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_slli_epi16(U, X, C) \ ((__m128i)__builtin_ia32_psllwi128_mask( \ (__v8hi)(__m128i)(X), (int)(C), (__v8hi)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_dbsad_epu8(X, Y, C) \ ((__m256i)__builtin_ia32_dbpsadbw256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(C), \ (__v16hi)(__m256i)_mm256_setzero_si256(), (__mmask16)-1)) #define _mm256_mask_slli_epi16(W, U, X, C) \ ((__m256i)__builtin_ia32_psllwi256_mask( \ (__v16hi)(__m256i)(X), (int)(C), (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_slli_epi16(U, X, C) \ ((__m256i)__builtin_ia32_psllwi256_mask( \ (__v16hi)(__m256i)(X), (int)(C), \ (__v16hi)(__m256i)_mm256_setzero_si256(), (__mmask16)(U))) #define _mm256_mask_dbsad_epu8(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_dbpsadbw256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(C), \ (__v16hi)(__m256i)(W), (__mmask16)(U))) #define _mm256_maskz_dbsad_epu8(U, X, Y, C) \ ((__m256i)__builtin_ia32_dbpsadbw256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(C), \ (__v16hi)(__m256i)_mm256_setzero_si256(), (__mmask16)(U))) #define _mm_dbsad_epu8(X, Y, C) \ ((__m128i)__builtin_ia32_dbpsadbw128_mask( \ (__v16qi)(__m128i)(X), (__v16qi)(__m128i)(Y), (int)(C), \ (__v8hi)(__m128i)_mm_setzero_si128(), (__mmask8)-1)) #define _mm_mask_dbsad_epu8(W, U, X, Y, C) \ ((__m128i)__builtin_ia32_dbpsadbw128_mask( \ (__v16qi)(__m128i)(X), (__v16qi)(__m128i)(Y), (int)(C), \ (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_dbsad_epu8(U, X, Y, C) \ ((__m128i)__builtin_ia32_dbpsadbw128_mask( \ (__v16qi)(__m128i)(X), (__v16qi)(__m128i)(Y), (int)(C), \ (__v8hi)(__m128i)_mm_setzero_si128(), (__mmask8)(U))) #define _mm_mask_blend_epi16(__U, __A, __W) \ ((__m128i)__builtin_ia32_blendmw_128_mask((__v8hi)(__A), (__v8hi)(__W), \ (__mmask8)(__U))) #define _mm_mask_blend_epi8(__U, __A, __W) \ ((__m128i)__builtin_ia32_blendmb_128_mask((__v16qi)(__A), (__v16qi)(__W), \ (__mmask16)(__U))) #define _mm256_mask_blend_epi16(__U, __A, __W) \ ((__m256i)__builtin_ia32_blendmw_256_mask((__v16hi)(__A), (__v16hi)(__W), \ (__mmask16)(__U))) #define _mm256_mask_blend_epi8(__U, __A, __W) \ ((__m256i)__builtin_ia32_blendmb_256_mask((__v32qi)(__A), (__v32qi)(__W), \ (__mmask32)(__U))) #define _mm_cmp_epi16_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpw128_mask( \ (__v8hi)(__m128i)(X), (__v8hi)(__m128i)(Y), (int)(P), (__mmask8)(-1))) #define _mm_cmp_epi8_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(P), \ (__mmask16)(-1))) #define _mm256_cmp_epi16_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(X), \ (__v16hi)(__m256i)(Y), (int)(P), \ (__mmask16)(-1))) #define _mm256_cmp_epi8_mask(X, Y, P) \ ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(X), \ (__v32qi)(__m256i)(Y), (int)(P), \ (__mmask32)(-1))) #define _mm_cmp_epu16_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpw128_mask( \ (__v8hi)(__m128i)(X), (__v8hi)(__m128i)(Y), (int)(P), (__mmask8)(-1))) #define _mm_cmp_epu8_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(P), \ (__mmask16)(-1))) #define _mm256_cmp_epu16_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(X), \ (__v16hi)(__m256i)(Y), (int)(P), \ (__mmask16)(-1))) #define _mm256_cmp_epu8_mask(X, Y, P) \ ((__mmask32)__builtin_ia32_ucmpb256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(P), (__mmask32)-1)) #define _mm_mask_cmp_epi16_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpw128_mask( \ (__v8hi)(__m128i)(X), (__v8hi)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_epi8_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_cmpb128_mask( \ (__v16qi)(__m128i)(X), (__v16qi)(__m128i)(Y), (int)(P), (__mmask16)(M))) #define _mm256_mask_cmp_epi16_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_cmpw256_mask( \ (__v16hi)(__m256i)(X), (__v16hi)(__m256i)(Y), (int)(P), (__mmask16)(M))) #define _mm256_mask_cmp_epi8_mask(M, X, Y, P) \ ((__mmask32)__builtin_ia32_cmpb256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(P), (__mmask32)(M))) #define _mm_mask_cmp_epu16_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpw128_mask( \ (__v8hi)(__m128i)(X), (__v8hi)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_epu8_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpb128_mask( \ (__v16qi)(__m128i)(X), (__v16qi)(__m128i)(Y), (int)(P), (__mmask16)(M))) #define _mm256_mask_cmp_epu16_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpw256_mask( \ (__v16hi)(__m256i)(X), (__v16hi)(__m256i)(Y), (int)(P), (__mmask16)(M))) #define _mm256_mask_cmp_epu8_mask(M, X, Y, P) \ ((__mmask32)__builtin_ia32_ucmpb256_mask( \ (__v32qi)(__m256i)(X), (__v32qi)(__m256i)(Y), (int)(P), (__mmask32)M)) #endif __funline __mmask32 _mm256_cmpneq_epi8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 4, (__mmask32)-1); } __funline __mmask32 _mm256_cmplt_epi8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 1, (__mmask32)-1); } __funline __mmask32 _mm256_cmpge_epi8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 5, (__mmask32)-1); } __funline __mmask32 _mm256_cmple_epi8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 2, (__mmask32)-1); } __funline __mmask16 _mm256_cmpneq_epi16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 4, (__mmask16)-1); } __funline __mmask16 _mm256_cmplt_epi16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 1, (__mmask16)-1); } __funline __mmask16 _mm256_cmpge_epi16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 5, (__mmask16)-1); } __funline __mmask16 _mm256_cmple_epi16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 2, (__mmask16)-1); } __funline __mmask16 _mm_cmpneq_epu8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 4, (__mmask16)-1); } __funline __mmask16 _mm_cmplt_epu8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 1, (__mmask16)-1); } __funline __mmask16 _mm_cmpge_epu8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 5, (__mmask16)-1); } __funline __mmask16 _mm_cmple_epu8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 2, (__mmask16)-1); } __funline __mmask8 _mm_cmpneq_epu16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_cmplt_epu16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_cmpge_epu16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_cmple_epu16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 2, (__mmask8)-1); } __funline __mmask16 _mm_cmpneq_epi8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 4, (__mmask16)-1); } __funline __mmask16 _mm_cmplt_epi8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 1, (__mmask16)-1); } __funline __mmask16 _mm_cmpge_epi8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 5, (__mmask16)-1); } __funline __mmask16 _mm_cmple_epi8_mask(__m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 2, (__mmask16)-1); } __funline __mmask8 _mm_cmpneq_epi16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_cmplt_epi16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_cmpge_epi16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_cmple_epi16_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 2, (__mmask8)-1); } __funline __m256i _mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmulhrsw256_mask((__v16hi)__X, (__v16hi)__Y, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmulhrsw256_mask( (__v16hi)__X, (__v16hi)__Y, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhuw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhuw256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulhw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulhw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulhw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulhuw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulhuw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmulhrsw128_mask((__v8hi)__X, (__v8hi)__Y, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmulhrsw128_mask( (__v8hi)__X, (__v8hi)__Y, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmullw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmullw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmullw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmullw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbw256_mask((__v16qi)__A, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbw256_mask( (__v16qi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbw128_mask((__v16qi)__A, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbw128_mask( (__v16qi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbw256_mask((__v16qi)__A, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbw256_mask( (__v16qi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbw128_mask((__v16qi)__A, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbw128_mask( (__v16qi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pavgb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pavgb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pavgw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pavgw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pavgw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusb256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddusw256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubsw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusb256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m256i _mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusw256_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubusw256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhbw256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhbw256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhbw128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhbw128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhwd256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhwd256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhwd128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhwd128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklbw256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklbw256_mask( (__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklbw128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklbw128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklwd256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklwd256_mask( (__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklwd128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklwd128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __mmask16 _mm_cmpeq_epi8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __mmask16 _mm_cmpeq_epu8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__A, (__v16qi)__B, 0, (__mmask16)-1); } __funline __mmask16 _mm_mask_cmpeq_epu8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__A, (__v16qi)__B, 0, __U); } __funline __mmask16 _mm_mask_cmpeq_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__A, (__v16qi)__B, __U); } __funline __mmask32 _mm256_cmpeq_epu8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__A, (__v32qi)__B, 0, (__mmask32)-1); } __funline __mmask32 _mm256_cmpeq_epi8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __mmask32 _mm256_mask_cmpeq_epu8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__A, (__v32qi)__B, 0, __U); } __funline __mmask32 _mm256_mask_cmpeq_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__A, (__v32qi)__B, __U); } __funline __mmask8 _mm_cmpeq_epu16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__A, (__v8hi)__B, 0, (__mmask8)-1); } __funline __mmask8 _mm_cmpeq_epi16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__A, (__v8hi)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpeq_epu16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__A, (__v8hi)__B, 0, __U); } __funline __mmask8 _mm_mask_cmpeq_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__A, (__v8hi)__B, __U); } __funline __mmask16 _mm256_cmpeq_epu16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__A, (__v16hi)__B, 0, (__mmask16)-1); } __funline __mmask16 _mm256_cmpeq_epi16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__A, (__v16hi)__B, (__mmask16)-1); } __funline __mmask16 _mm256_mask_cmpeq_epu16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__A, (__v16hi)__B, 0, __U); } __funline __mmask16 _mm256_mask_cmpeq_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_pcmpeqw256_mask((__v16hi)__A, (__v16hi)__B, __U); } __funline __mmask16 _mm_cmpgt_epu8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__A, (__v16qi)__B, 6, (__mmask16)-1); } __funline __mmask16 _mm_cmpgt_epi8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __mmask16 _mm_mask_cmpgt_epu8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__A, (__v16qi)__B, 6, __U); } __funline __mmask16 _mm_mask_cmpgt_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__A, (__v16qi)__B, __U); } __funline __mmask32 _mm256_cmpgt_epu8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__A, (__v32qi)__B, 6, (__mmask32)-1); } __funline __mmask32 _mm256_cmpgt_epi8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __mmask32 _mm256_mask_cmpgt_epu8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__A, (__v32qi)__B, 6, __U); } __funline __mmask32 _mm256_mask_cmpgt_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__A, (__v32qi)__B, __U); } __funline __mmask8 _mm_cmpgt_epu16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__A, (__v8hi)__B, 6, (__mmask8)-1); } __funline __mmask8 _mm_cmpgt_epi16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__A, (__v8hi)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpgt_epu16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__A, (__v8hi)__B, 6, __U); } __funline __mmask8 _mm_mask_cmpgt_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__A, (__v8hi)__B, __U); } __funline __mmask16 _mm256_cmpgt_epu16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__A, (__v16hi)__B, 6, (__mmask16)-1); } __funline __mmask16 _mm256_cmpgt_epi16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__A, (__v16hi)__B, (__mmask16)-1); } __funline __mmask16 _mm256_mask_cmpgt_epu16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__A, (__v16hi)__B, 6, __U); } __funline __mmask16 _mm256_mask_cmpgt_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__A, (__v16hi)__B, __U); } __funline __mmask16 _mm_testn_epi8_mask(__m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ptestnmb128((__v16qi)__A, (__v16qi)__B, (__mmask16)-1); } __funline __mmask16 _mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return (__mmask16)__builtin_ia32_ptestnmb128((__v16qi)__A, (__v16qi)__B, __U); } __funline __mmask32 _mm256_testn_epi8_mask(__m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ptestnmb256((__v32qi)__A, (__v32qi)__B, (__mmask32)-1); } __funline __mmask32 _mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) { return (__mmask32)__builtin_ia32_ptestnmb256((__v32qi)__A, (__v32qi)__B, __U); } __funline __mmask8 _mm_testn_epi16_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmw128((__v8hi)__A, (__v8hi)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmw128((__v8hi)__A, (__v8hi)__B, __U); } __funline __mmask16 _mm256_testn_epi16_mask(__m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ptestnmw256((__v16hi)__A, (__v16hi)__B, (__mmask16)-1); } __funline __mmask16 _mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) { return (__mmask16)__builtin_ia32_ptestnmw256((__v16hi)__A, (__v16hi)__B, __U); } __funline __m256i _mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pshufb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pshufb256_mask((__v32qi)__A, (__v32qi)__B, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pshufb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pshufb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packsswb256_mask( (__v16hi)__A, (__v16hi)__B, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packsswb256_mask((__v16hi)__A, (__v16hi)__B, (__v32qi)__W, __M); } __funline __m128i _mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packsswb128_mask( (__v8hi)__A, (__v8hi)__B, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packsswb128_mask((__v8hi)__A, (__v8hi)__B, (__v16qi)__W, __M); } __funline __m256i _mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packuswb256_mask( (__v16hi)__A, (__v16hi)__B, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packuswb256_mask((__v16hi)__A, (__v16hi)__B, (__v32qi)__W, __M); } __funline __m128i _mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packuswb128_mask( (__v8hi)__A, (__v8hi)__B, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packuswb128_mask((__v8hi)__A, (__v8hi)__B, (__v16qi)__W, __M); } __funline __m256i _mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsb256_mask((__v32qi)__A, (__v32qi)__W, (__mmask32)__U); } __funline __m256i _mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsb256_mask( (__v32qi)__A, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U); } __funline __m128i _mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsb128_mask((__v16qi)__A, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsb128_mask( (__v16qi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsw256_mask((__v16hi)__A, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsw256_mask( (__v16hi)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsw128_mask((__v8hi)__A, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsw128_mask( (__v8hi)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __mmask32 _mm256_cmpneq_epu8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 4, (__mmask32)-1); } __funline __mmask32 _mm256_cmplt_epu8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 1, (__mmask32)-1); } __funline __mmask32 _mm256_cmpge_epu8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 5, (__mmask32)-1); } __funline __mmask32 _mm256_cmple_epu8_mask(__m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 2, (__mmask32)-1); } __funline __mmask16 _mm256_cmpneq_epu16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 4, (__mmask16)-1); } __funline __mmask16 _mm256_cmplt_epu16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 1, (__mmask16)-1); } __funline __mmask16 _mm256_cmpge_epu16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 5, (__mmask16)-1); } __funline __mmask16 _mm256_cmple_epu16_mask(__m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 2, (__mmask16)-1); } __funline void _mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) { __builtin_ia32_storedquhi256_mask((short *)__P, (__v16hi)__A, (__mmask16)__U); } __funline void _mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_storedquhi128_mask((short *)__P, (__v8hi)__A, (__mmask8)__U); } __funline __m128i _mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddsw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubsb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubsb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubsw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubsw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubusb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubusb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubusw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubusw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srl_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddsw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddusb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddusb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddusw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddusw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddsb128_mask((__v16qi)__A, (__v16qi)__B, (__v16qi)__W, (__mmask16)__U); } __funline __m128i _mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddsb128_mask( (__v16qi)__A, (__v16qi)__B, (__v16qi)_mm_setzero_si128(), (__mmask16)__U); } __funline __m128i _mm_cvtepi16_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovwb128_mask( (__v8hi)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovwb128mem_mask((__v8qi *)__P, (__v8hi)__A, __M); } __funline __m128i _mm_mask_cvtepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovwb128_mask((__v8hi)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtepi16_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovwb128_mask( (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m256i _mm256_srav_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrav16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)-1); } __funline __m256i _mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrav16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrav16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_srav_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrav8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrav8hi_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrav8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_srlv_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrlv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)-1); } __funline __m256i _mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrlv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psrlv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_srlv_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlv8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlv8hi_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlv8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_sllv_epi16(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psllv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)-1); } __funline __m256i _mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psllv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psllv16hi_mask((__v16hi)__A, (__v16hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_sllv_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllv8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllv8hi_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllv8hi_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllw128_mask((__v8hi)__A, (__v8hi)__B, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sll_epi16(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllw128_mask( (__v8hi)__A, (__v8hi)__B, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)__W, (__mmask16)__U); } __funline __m256i _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllw256_mask((__v16hi)__A, (__v8hi)__B, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m256i _mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packusdw256_mask( (__v8si)__A, (__v8si)__B, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packusdw256_mask((__v8si)__A, (__v8si)__B, (__v16hi)__W, __M); } __funline __m128i _mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packusdw128_mask( (__v4si)__A, (__v4si)__B, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packusdw128_mask((__v4si)__A, (__v4si)__B, (__v8hi)__W, __M); } __funline __m256i _mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packssdw256_mask( (__v8si)__A, (__v8si)__B, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_packssdw256_mask((__v8si)__A, (__v8si)__B, (__v16hi)__W, __M); } __funline __m128i _mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packssdw128_mask( (__v4si)__A, (__v4si)__B, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_packssdw128_mask((__v4si)__A, (__v4si)__B, (__v8hi)__W, __M); } __funline __mmask16 _mm_mask_cmpneq_epu8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmplt_epu8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmpge_epu8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmple_epu8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__X, (__v16qi)__Y, 2, (__mmask16)__M); } __funline __mmask8 _mm_mask_cmpneq_epu16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmplt_epu16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmpge_epu16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmple_epu16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__X, (__v8hi)__Y, 2, (__mmask8)__M); } __funline __mmask16 _mm_mask_cmpneq_epi8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmplt_epi8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmpge_epi8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm_mask_cmple_epi8_mask(__mmask16 __M, __m128i __X, __m128i __Y) { return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__X, (__v16qi)__Y, 2, (__mmask16)__M); } __funline __mmask8 _mm_mask_cmpneq_epi16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmplt_epi16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmpge_epi16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_mask_cmple_epi16_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__X, (__v8hi)__Y, 2, (__mmask8)__M); } __funline __mmask32 _mm256_mask_cmpneq_epu8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 4, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmplt_epu8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 1, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmpge_epu8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 5, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmple_epu8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__X, (__v32qi)__Y, 2, (__mmask32)__M); } __funline __mmask16 _mm256_mask_cmpneq_epu16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmplt_epu16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmpge_epu16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmple_epu16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__X, (__v16hi)__Y, 2, (__mmask16)__M); } __funline __mmask32 _mm256_mask_cmpneq_epi8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 4, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmplt_epi8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 1, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmpge_epi8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 5, (__mmask32)__M); } __funline __mmask32 _mm256_mask_cmple_epi8_mask(__mmask32 __M, __m256i __X, __m256i __Y) { return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__X, (__v32qi)__Y, 2, (__mmask32)__M); } __funline __mmask16 _mm256_mask_cmpneq_epi16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmplt_epi16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmpge_epi16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm256_mask_cmple_epi16_mask(__mmask16 __M, __m256i __X, __m256i __Y) { return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__X, (__v16hi)__Y, 2, (__mmask16)__M); } #ifdef __DISABLE_AVX512VLBW__ #undef __DISABLE_AVX512VLBW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VLBW__ */ #endif /* _AVX512VLBWINTRIN_H_INCLUDED */
122,253
2,741
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xopintrin.internal.h
#ifndef _X86INTRIN_H_INCLUDED #error "Never use <xopintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _XOPMMINTRIN_H_INCLUDED #define _XOPMMINTRIN_H_INCLUDED #include "third_party/intel/fma4intrin.internal.h" #ifndef __XOP__ #pragma GCC push_options #pragma GCC target("xop") #define __DISABLE_XOP__ #endif /* __XOP__ */ __funline __m128i _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); } __funline __m128i _mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); } __funline __m128i _mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); } __funline __m128i _mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); } __funline __m128i _mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C); } __funline __m128i _mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C); } __funline __m128i _mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C); } __funline __m128i _mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); } __funline __m128i _mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); } __funline __m128i _mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); } __funline __m128i _mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); } /* Packed Integer Horizontal Add and Subtract */ __funline __m128i _mm_haddw_epi8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A); } __funline __m128i _mm_haddd_epi8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A); } __funline __m128i _mm_haddq_epi8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A); } __funline __m128i _mm_haddd_epi16(__m128i __A) { return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A); } __funline __m128i _mm_haddq_epi16(__m128i __A) { return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A); } __funline __m128i _mm_haddq_epi32(__m128i __A) { return (__m128i)__builtin_ia32_vphadddq((__v4si)__A); } __funline __m128i _mm_haddw_epu8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A); } __funline __m128i _mm_haddd_epu8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A); } __funline __m128i _mm_haddq_epu8(__m128i __A) { return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A); } __funline __m128i _mm_haddd_epu16(__m128i __A) { return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A); } __funline __m128i _mm_haddq_epu16(__m128i __A) { return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A); } __funline __m128i _mm_haddq_epu32(__m128i __A) { return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A); } __funline __m128i _mm_hsubw_epi8(__m128i __A) { return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A); } __funline __m128i _mm_hsubd_epi16(__m128i __A) { return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A); } __funline __m128i _mm_hsubq_epi32(__m128i __A) { return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A); } /* Vector conditional move and permute */ __funline __m128i _mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C); } __funline __m128i _mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C); } /* Packed Integer Rotates and Shifts Rotates - Non-Immediate form */ __funline __m128i _mm_rot_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_rot_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_rot_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_rot_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B); } #ifdef __OPTIMIZE__ __funline __m128i _mm_roti_epi8(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_vprotbi((__v16qi)__A, __B); } __funline __m128i _mm_roti_epi16(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_vprotwi((__v8hi)__A, __B); } __funline __m128i _mm_roti_epi32(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_vprotdi((__v4si)__A, __B); } __funline __m128i _mm_roti_epi64(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_vprotqi((__v2di)__A, __B); } #else #define _mm_roti_epi8(A, N) \ ((__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (int)(N))) #define _mm_roti_epi16(A, N) \ ((__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (int)(N))) #define _mm_roti_epi32(A, N) \ ((__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (int)(N))) #define _mm_roti_epi64(A, N) \ ((__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (int)(N))) #endif __funline __m128i _mm_shl_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_shl_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_shl_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_shl_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_sha_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_sha_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_sha_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_sha_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comlt_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comle_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comgt_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comge_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comeq_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomequb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comneq_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomnequb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comfalse_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comtrue_epu8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueub((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comlt_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comle_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comgt_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comge_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comeq_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomequw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comneq_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomnequw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comfalse_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comtrue_epu16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueuw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comlt_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comle_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comgt_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comge_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comeq_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomequd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comneq_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomnequd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comfalse_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comtrue_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueud((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comlt_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comle_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comgt_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comge_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comeq_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomequq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comneq_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomnequq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comfalse_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comtrue_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueuq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comlt_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comle_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comgt_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comge_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comeq_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomeqb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comneq_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomneqb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comfalse_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comtrue_epi8(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueb((__v16qi)__A, (__v16qi)__B); } __funline __m128i _mm_comlt_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comle_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomlew((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comgt_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comge_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgew((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comeq_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomeqw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comneq_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomneqw((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comfalse_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalsew((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comtrue_epi16(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtruew((__v8hi)__A, (__v8hi)__B); } __funline __m128i _mm_comlt_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comle_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomled((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comgt_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comge_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomged((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comeq_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomeqd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comneq_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomneqd((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comfalse_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalsed((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comtrue_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrued((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_comlt_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomltq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comle_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomleq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comgt_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgtq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comge_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomgeq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comeq_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomeqq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comneq_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomneqq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comfalse_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomfalseq((__v2di)__A, (__v2di)__B); } __funline __m128i _mm_comtrue_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_vpcomtrueq((__v2di)__A, (__v2di)__B); } __funline __m128 _mm_frcz_ps(__m128 __A) { return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); } __funline __m128d _mm_frcz_pd(__m128d __A) { return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A); } __funline __m128 _mm_frcz_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movss( (__v4sf)__A, (__v4sf)__builtin_ia32_vfrczss((__v4sf)__B)); } __funline __m128d _mm_frcz_sd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_movsd( (__v2df)__A, (__v2df)__builtin_ia32_vfrczsd((__v2df)__B)); } __funline __m256 _mm256_frcz_ps(__m256 __A) { return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A); } __funline __m256d _mm256_frcz_pd(__m256d __A) { return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A); } #ifdef __OPTIMIZE__ __funline __m128d _mm_permute2_pd(__m128d __X, __m128d __Y, __m128i __C, const int __I) { return (__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, (__v2di)__C, __I); } __funline __m256d _mm256_permute2_pd(__m256d __X, __m256d __Y, __m256i __C, const int __I) { return (__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, (__v4di)__C, __I); } __funline __m128 _mm_permute2_ps(__m128 __X, __m128 __Y, __m128i __C, const int __I) { return (__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, (__v4si)__C, __I); } __funline __m256 _mm256_peeeeeeermute2_ps(__m256 __X, __m256 __Y, __m256i __C, const int __I) { return (__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, (__v8si)__C, __I); } #else #define _mm_permute2_pd(X, Y, C, I) \ ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \ (__v2df)(__m128d)(Y), \ (__v2di)(__m128d)(C), (int)(I))) #define _mm256_permute2_pd(X, Y, C, I) \ ((__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \ (__v4df)(__m256d)(Y), \ (__v4di)(__m256d)(C), (int)(I))) #define _mm_permute2_ps(X, Y, C, I) \ ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ (__v4si)(__m128)(C), (int)(I))) #define _mm256_permute2_ps(X, Y, C, I) \ ((__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \ (__v8sf)(__m256)(Y), \ (__v8si)(__m256)(C), (int)(I))) #endif /* __OPTIMIZE__ */ #ifdef __DISABLE_XOP__ #undef __DISABLE_XOP__ #pragma GCC pop_options #endif /* __DISABLE_XOP__ */ #endif /* _XOPMMINTRIN_H_INCLUDED */
19,147
559
jart/cosmopolitan
false
cosmopolitan/third_party/intel/smmintrin.internal.h
#ifndef _SMMINTRIN_H_INCLUDED #define _SMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/tmmintrin.internal.h" #ifndef __SSE4_1__ #pragma GCC push_options #pragma GCC target("sse4.1") #define __DISABLE_SSE4_1__ #endif /* __SSE4_1__ */ #define _MM_FROUND_TO_NEAREST_INT 0x00 #define _MM_FROUND_TO_NEG_INF 0x01 #define _MM_FROUND_TO_POS_INF 0x02 #define _MM_FROUND_TO_ZERO 0x03 #define _MM_FROUND_CUR_DIRECTION 0x04 #define _MM_FROUND_RAISE_EXC 0x00 #define _MM_FROUND_NO_EXC 0x08 #define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC) #define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC) #define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC) #define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC) #define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC) #define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC) __funline int _mm_testz_si128(__m128i __M, __m128i __V) { return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V); } __funline int _mm_testc_si128(__m128i __M, __m128i __V) { return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V); } __funline int _mm_testnzc_si128(__m128i __M, __m128i __V) { return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V); } #define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V)) #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V))) #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) #ifdef __OPTIMIZE__ __funline __m128d _mm_round_pd(__m128d __V, const int __M) { return (__m128d)__builtin_ia32_roundpd((__v2df)__V, __M); } __funline __m128d _mm_round_sd(__m128d __D, __m128d __V, const int __M) { return (__m128d)__builtin_ia32_roundsd((__v2df)__D, (__v2df)__V, __M); } #else #define _mm_round_pd(V, M) \ ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(V), (int)(M))) #define _mm_round_sd(D, V, M) \ ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(D), (__v2df)(__m128d)(V), \ (int)(M))) #endif #ifdef __OPTIMIZE__ __funline __m128 _mm_round_ps(__m128 __V, const int __M) { return (__m128)__builtin_ia32_roundps((__v4sf)__V, __M); } __funline __m128 _mm_round_ss(__m128 __D, __m128 __V, const int __M) { return (__m128)__builtin_ia32_roundss((__v4sf)__D, (__v4sf)__V, __M); } #else #define _mm_round_ps(V, M) \ ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(V), (int)(M))) #define _mm_round_ss(D, V, M) \ ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(D), (__v4sf)(__m128)(V), \ (int)(M))) #endif #define _mm_ceil_pd(V) _mm_round_pd((V), _MM_FROUND_CEIL) #define _mm_ceil_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_CEIL) #define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR) #define _mm_floor_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_FLOOR) #define _mm_ceil_ps(V) _mm_round_ps((V), _MM_FROUND_CEIL) #define _mm_ceil_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_CEIL) #define _mm_floor_ps(V) _mm_round_ps((V), _MM_FROUND_FLOOR) #define _mm_floor_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_FLOOR) #ifdef __OPTIMIZE__ __funline __m128i _mm_blend_epi16(__m128i __X, __m128i __Y, const int __M) { return (__m128i)__builtin_ia32_pblendw128((__v8hi)__X, (__v8hi)__Y, __M); } #else #define _mm_blend_epi16(X, Y, M) \ ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(X), \ (__v8hi)(__m128i)(Y), (int)(M))) #endif __funline __m128i _mm_blendv_epi8(__m128i __X, __m128i __Y, __m128i __M) { return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__X, (__v16qi)__Y, (__v16qi)__M); } #ifdef __OPTIMIZE__ __funline __m128 _mm_blend_ps(__m128 __X, __m128 __Y, const int __M) { return (__m128)__builtin_ia32_blendps((__v4sf)__X, (__v4sf)__Y, __M); } #else #define _mm_blend_ps(X, Y, M) \ ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ (int)(M))) #endif __funline __m128 _mm_blendv_ps(__m128 __X, __m128 __Y, __m128 __M) { return (__m128)__builtin_ia32_blendvps((__v4sf)__X, (__v4sf)__Y, (__v4sf)__M); } #ifdef __OPTIMIZE__ __funline __m128d _mm_blend_pd(__m128d __X, __m128d __Y, const int __M) { return (__m128d)__builtin_ia32_blendpd((__v2df)__X, (__v2df)__Y, __M); } #else #define _mm_blend_pd(X, Y, M) \ ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ (int)(M))) #endif __funline __m128d _mm_blendv_pd(__m128d __X, __m128d __Y, __m128d __M) { return (__m128d)__builtin_ia32_blendvpd((__v2df)__X, (__v2df)__Y, (__v2df)__M); } #ifdef __OPTIMIZE__ __funline __m128 _mm_dp_ps(__m128 __X, __m128 __Y, const int __M) { return (__m128)__builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, __M); } __funline __m128d _mm_dp_pd(__m128d __X, __m128d __Y, const int __M) { return (__m128d)__builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, __M); } #else #define _mm_dp_ps(X, Y, M) \ ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ (int)(M))) #define _mm_dp_pd(X, Y, M) \ ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ (int)(M))) #endif __funline __m128i _mm_cmpeq_epi64(__m128i __X, __m128i __Y) { return (__m128i)((__v2di)__X == (__v2di)__Y); } __funline __m128i _mm_min_epi8(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pminsb128((__v16qi)__X, (__v16qi)__Y); } __funline __m128i _mm_max_epi8(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaxsb128((__v16qi)__X, (__v16qi)__Y); } __funline __m128i _mm_min_epu16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pminuw128((__v8hi)__X, (__v8hi)__Y); } __funline __m128i _mm_max_epu16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaxuw128((__v8hi)__X, (__v8hi)__Y); } __funline __m128i _mm_min_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pminsd128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_max_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaxsd128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_min_epu32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pminud128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_max_epu32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaxud128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_mullo_epi32(__m128i __X, __m128i __Y) { return (__m128i)((__v4su)__X * (__v4su)__Y); } __funline __m128i _mm_mul_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmuldq128((__v4si)__X, (__v4si)__Y); } #ifdef __OPTIMIZE__ __funline __m128 _mm_insert_ps(__m128 __D, __m128 __S, const int __N) { return (__m128)__builtin_ia32_insertps128((__v4sf)__D, (__v4sf)__S, __N); } #else #define _mm_insert_ps(D, S, N) \ ((__m128)__builtin_ia32_insertps128((__v4sf)(__m128)(D), \ (__v4sf)(__m128)(S), (int)(N))) #endif #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M)) #ifdef __OPTIMIZE__ __funline int _mm_extract_ps(__m128 __X, const int __N) { union { int i; float f; } __tmp; __tmp.f = __builtin_ia32_vec_ext_v4sf((__v4sf)__X, __N); return __tmp.i; } #else #define _mm_extract_ps(X, N) \ (__extension__({ \ union { \ int i; \ float f; \ } __tmp; \ __tmp.f = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \ __tmp.i; \ })) #endif #define _MM_EXTRACT_FLOAT(D, S, N) \ { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(S), (N)); } #define _MM_PICK_OUT_PS(X, N) \ _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) #ifdef __OPTIMIZE__ __funline __m128i _mm_insert_epi8(__m128i __D, int __S, const int __N) { return (__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)__D, __S, __N); } __funline __m128i _mm_insert_epi32(__m128i __D, int __S, const int __N) { return (__m128i)__builtin_ia32_vec_set_v4si((__v4si)__D, __S, __N); } #ifdef __x86_64__ __funline __m128i _mm_insert_epi64(__m128i __D, long long __S, const int __N) { return (__m128i)__builtin_ia32_vec_set_v2di((__v2di)__D, __S, __N); } #endif #else #define _mm_insert_epi8(D, S, N) \ ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(D), (int)(S), \ (int)(N))) #define _mm_insert_epi32(D, S, N) \ ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(D), (int)(S), \ (int)(N))) #ifdef __x86_64__ #define _mm_insert_epi64(D, S, N) \ ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(D), (long long)(S), \ (int)(N))) #endif #endif #ifdef __OPTIMIZE__ __funline int _mm_extract_epi8(__m128i __X, const int __N) { return (unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)__X, __N); } __funline int _mm_extract_epi32(__m128i __X, const int __N) { return __builtin_ia32_vec_ext_v4si((__v4si)__X, __N); } #ifdef __x86_64__ __funline long long _mm_extract_epi64(__m128i __X, const int __N) { return __builtin_ia32_vec_ext_v2di((__v2di)__X, __N); } #endif #else #define _mm_extract_epi8(X, N) \ ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \ (int)(N))) #define _mm_extract_epi32(X, N) \ ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N))) #ifdef __x86_64__ #define _mm_extract_epi64(X, N) \ ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N))) #endif #endif __funline __m128i _mm_minpos_epu16(__m128i __X) { return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__X); } __funline __m128i _mm_cvtepi8_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxbd128((__v16qi)__X); } __funline __m128i _mm_cvtepi16_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxwd128((__v8hi)__X); } __funline __m128i _mm_cvtepi8_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxbq128((__v16qi)__X); } __funline __m128i _mm_cvtepi32_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxdq128((__v4si)__X); } __funline __m128i _mm_cvtepi16_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxwq128((__v8hi)__X); } __funline __m128i _mm_cvtepi8_epi16(__m128i __X) { return (__m128i)__builtin_ia32_pmovsxbw128((__v16qi)__X); } __funline __m128i _mm_cvtepu8_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxbd128((__v16qi)__X); } __funline __m128i _mm_cvtepu16_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxwd128((__v8hi)__X); } __funline __m128i _mm_cvtepu8_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxbq128((__v16qi)__X); } __funline __m128i _mm_cvtepu32_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxdq128((__v4si)__X); } __funline __m128i _mm_cvtepu16_epi64(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxwq128((__v8hi)__X); } __funline __m128i _mm_cvtepu8_epi16(__m128i __X) { return (__m128i)__builtin_ia32_pmovzxbw128((__v16qi)__X); } __funline __m128i _mm_packus_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_packusdw128((__v4si)__X, (__v4si)__Y); } #ifdef __OPTIMIZE__ __funline __m128i _mm_mpsadbw_epu8(__m128i __X, __m128i __Y, const int __M) { return (__m128i)__builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, __M); } #else #define _mm_mpsadbw_epu8(X, Y, M) \ ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #endif __funline __m128i _mm_stream_load_si128(__m128i *__X) { return (__m128i)__builtin_ia32_movntdqa((__v2di *)__X); } #ifndef __SSE4_2__ #pragma GCC push_options #pragma GCC target("sse4.2") #define __DISABLE_SSE4_2__ #endif #define _SIDD_UBYTE_OPS 0x00 #define _SIDD_UWORD_OPS 0x01 #define _SIDD_SBYTE_OPS 0x02 #define _SIDD_SWORD_OPS 0x03 #define _SIDD_CMP_EQUAL_ANY 0x00 #define _SIDD_CMP_RANGES 0x04 #define _SIDD_CMP_EQUAL_EACH 0x08 #define _SIDD_CMP_EQUAL_ORDERED 0x0c #define _SIDD_POSITIVE_POLARITY 0x00 #define _SIDD_NEGATIVE_POLARITY 0x10 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 #define _SIDD_LEAST_SIGNIFICANT 0x00 #define _SIDD_MOST_SIGNIFICANT 0x40 #define _SIDD_BIT_MASK 0x00 #define _SIDD_UNIT_MASK 0x40 #ifdef __OPTIMIZE__ __funline __m128i _mm_cmpistrm(__m128i __X, __m128i __Y, const int __M) { return (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpistri(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistri128((__v16qi)__X, (__v16qi)__Y, __M); } __funline __m128i _mm_cmpestrm(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } __funline int _mm_cmpestri(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestri128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } #else #define _mm_cmpistrm(X, Y, M) \ ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpistri(X, Y, M) \ ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpestrm(X, LX, Y, LY, M) \ ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #define _mm_cmpestri(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #endif #ifdef __OPTIMIZE__ __funline int _mm_cmpistra(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistria128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpistrc(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistric128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpistro(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistrio128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpistrs(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistris128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpistrz(__m128i __X, __m128i __Y, const int __M) { return __builtin_ia32_pcmpistriz128((__v16qi)__X, (__v16qi)__Y, __M); } __funline int _mm_cmpestra(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestria128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } __funline int _mm_cmpestrc(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestric128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } __funline int _mm_cmpestro(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestrio128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } __funline int _mm_cmpestrs(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestris128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } __funline int _mm_cmpestrz(__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) { return __builtin_ia32_pcmpestriz128((__v16qi)__X, __LX, (__v16qi)__Y, __LY, __M); } #else #define _mm_cmpistra(X, Y, M) \ ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpistrc(X, Y, M) \ ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpistro(X, Y, M) \ ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpistrs(X, Y, M) \ ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpistrz(X, Y, M) \ ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(X), \ (__v16qi)(__m128i)(Y), (int)(M))) #define _mm_cmpestra(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #define _mm_cmpestrc(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #define _mm_cmpestro(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #define _mm_cmpestrs(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #define _mm_cmpestrz(X, LX, Y, LY, M) \ ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(X), (int)(LX), \ (__v16qi)(__m128i)(Y), (int)(LY), \ (int)(M))) #endif __funline __m128i _mm_cmpgt_epi64(__m128i __X, __m128i __Y) { return (__m128i)((__v2di)__X > (__v2di)__Y); } #ifdef __DISABLE_SSE4_2__ #undef __DISABLE_SSE4_2__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_2__ */ #ifdef __DISABLE_SSE4_1__ #undef __DISABLE_SSE4_1__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_1__ */ #include "third_party/intel/popcntintrin.internal.h" #ifndef __SSE4_1__ #pragma GCC push_options #pragma GCC target("sse4.1") #define __DISABLE_SSE4_1__ #endif /* __SSE4_1__ */ #ifndef __SSE4_2__ #pragma GCC push_options #pragma GCC target("sse4.2") #define __DISABLE_SSE4_2__ #endif /* __SSE4_1__ */ /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */ __funline unsigned int _mm_crc32_u8(unsigned int __C, unsigned char __V) { return __builtin_ia32_crc32qi(__C, __V); } __funline unsigned int _mm_crc32_u16(unsigned int __C, unsigned short __V) { return __builtin_ia32_crc32hi(__C, __V); } __funline unsigned int _mm_crc32_u32(unsigned int __C, unsigned int __V) { return __builtin_ia32_crc32si(__C, __V); } #ifdef __x86_64__ __funline unsigned long long _mm_crc32_u64(unsigned long long __C, unsigned long long __V) { return __builtin_ia32_crc32di(__C, __V); } #endif #ifdef __DISABLE_SSE4_2__ #undef __DISABLE_SSE4_2__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_2__ */ #ifdef __DISABLE_SSE4_1__ #undef __DISABLE_SSE4_1__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_1__ */ #endif /* __x86_64__ */ #endif /* _SMMINTRIN_H_INCLUDED */
21,055
578
jart/cosmopolitan
false
cosmopolitan/third_party/intel/tbmintrin.internal.h
#ifndef _X86INTRIN_H_INCLUDED #error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _TBMINTRIN_H_INCLUDED #define _TBMINTRIN_H_INCLUDED #ifndef __TBM__ #pragma GCC push_options #pragma GCC target("tbm") #define __DISABLE_TBM__ #endif /* __TBM__ */ #ifdef __OPTIMIZE__ __funline unsigned int __bextri_u32(unsigned int __X, const unsigned int __I) { return __builtin_ia32_bextri_u32(__X, __I); } #else #define __bextri_u32(X, I) \ ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(X), \ (unsigned int)(I))) #endif /*__OPTIMIZE__ */ __funline unsigned int __blcfill_u32(unsigned int __X) { return __X & (__X + 1); } __funline unsigned int __blci_u32(unsigned int __X) { return __X | ~(__X + 1); } __funline unsigned int __blcic_u32(unsigned int __X) { return ~__X & (__X + 1); } __funline unsigned int __blcmsk_u32(unsigned int __X) { return __X ^ (__X + 1); } __funline unsigned int __blcs_u32(unsigned int __X) { return __X | (__X + 1); } __funline unsigned int __blsfill_u32(unsigned int __X) { return __X | (__X - 1); } __funline unsigned int __blsic_u32(unsigned int __X) { return ~__X | (__X - 1); } __funline unsigned int __t1mskc_u32(unsigned int __X) { return ~__X | (__X + 1); } __funline unsigned int __tzmsk_u32(unsigned int __X) { return ~__X & (__X - 1); } #ifdef __x86_64__ #ifdef __OPTIMIZE__ __funline unsigned long long __bextri_u64(unsigned long long __X, const unsigned int __I) { return __builtin_ia32_bextri_u64(__X, __I); } #else #define __bextri_u64(X, I) \ ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(X), \ (unsigned long long)(I))) #endif /*__OPTIMIZE__ */ __funline unsigned long long __blcfill_u64(unsigned long long __X) { return __X & (__X + 1); } __funline unsigned long long __blci_u64(unsigned long long __X) { return __X | ~(__X + 1); } __funline unsigned long long __blcic_u64(unsigned long long __X) { return ~__X & (__X + 1); } __funline unsigned long long __blcmsk_u64(unsigned long long __X) { return __X ^ (__X + 1); } __funline unsigned long long __blcs_u64(unsigned long long __X) { return __X | (__X + 1); } __funline unsigned long long __blsfill_u64(unsigned long long __X) { return __X | (__X - 1); } __funline unsigned long long __blsic_u64(unsigned long long __X) { return ~__X | (__X - 1); } __funline unsigned long long __t1mskc_u64(unsigned long long __X) { return ~__X | (__X + 1); } __funline unsigned long long __tzmsk_u64(unsigned long long __X) { return ~__X & (__X - 1); } #endif /* __x86_64__ */ #ifdef __DISABLE_TBM__ #undef __DISABLE_TBM__ #pragma GCC pop_options #endif /* __DISABLE_TBM__ */ #endif /* _TBMINTRIN_H_INCLUDED */
2,933
116
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512fintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512FINTRIN_H_INCLUDED #define _AVX512FINTRIN_H_INCLUDED #ifndef __AVX512F__ #pragma GCC push_options #pragma GCC target("avx512f") #define __DISABLE_AVX512F__ #endif /* __AVX512F__ */ typedef double __v8df __attribute__((__vector_size__(64))); typedef float __v16sf __attribute__((__vector_size__(64))); typedef long long __v8di __attribute__((__vector_size__(64))); typedef unsigned long long __v8du __attribute__((__vector_size__(64))); typedef int __v16si __attribute__((__vector_size__(64))); typedef unsigned int __v16su __attribute__((__vector_size__(64))); typedef short __v32hi __attribute__((__vector_size__(64))); typedef unsigned short __v32hu __attribute__((__vector_size__(64))); typedef char __v64qi __attribute__((__vector_size__(64))); typedef unsigned char __v64qu __attribute__((__vector_size__(64))); typedef float __m512 __attribute__((__vector_size__(64), __may_alias__)); typedef long long __m512i __attribute__((__vector_size__(64), __may_alias__)); typedef double __m512d __attribute__((__vector_size__(64), __may_alias__)); typedef float __m512_u __attribute__((__vector_size__(64), __may_alias__, __aligned__(1))); typedef long long __m512i_u __attribute__((__vector_size__(64), __may_alias__, __aligned__(1))); typedef double __m512d_u __attribute__((__vector_size__(64), __may_alias__, __aligned__(1))); typedef unsigned char __mmask8; typedef unsigned short __mmask16; __funline __mmask16 _mm512_int2mask(int __M) { return (__mmask16)__M; } __funline int _mm512_mask2int(__mmask16 __M) { return (int)__M; } __funline __m512i _mm512_set_epi64(long long __A, long long __B, long long __C, long long __D, long long __E, long long __F, long long __G, long long __H) { return __extension__(__m512i)(__v8di){__H, __G, __F, __E, __D, __C, __B, __A}; } __funline __m512i _mm512_set_epi32(int __A, int __B, int __C, int __D, int __E, int __F, int __G, int __H, int __I, int __J, int __K, int __L, int __M, int __N, int __O, int __P) { return __extension__(__m512i)(__v16si){__P, __O, __N, __M, __L, __K, __J, __I, __H, __G, __F, __E, __D, __C, __B, __A}; } __funline __m512i _mm512_set_epi16( short __q31, short __q30, short __q29, short __q28, short __q27, short __q26, short __q25, short __q24, short __q23, short __q22, short __q21, short __q20, short __q19, short __q18, short __q17, short __q16, short __q15, short __q14, short __q13, short __q12, short __q11, short __q10, short __q09, short __q08, short __q07, short __q06, short __q05, short __q04, short __q03, short __q02, short __q01, short __q00) { return __extension__(__m512i)(__v32hi){ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31}; } __funline __m512i _mm512_set_epi8( char __q63, char __q62, char __q61, char __q60, char __q59, char __q58, char __q57, char __q56, char __q55, char __q54, char __q53, char __q52, char __q51, char __q50, char __q49, char __q48, char __q47, char __q46, char __q45, char __q44, char __q43, char __q42, char __q41, char __q40, char __q39, char __q38, char __q37, char __q36, char __q35, char __q34, char __q33, char __q32, char __q31, char __q30, char __q29, char __q28, char __q27, char __q26, char __q25, char __q24, char __q23, char __q22, char __q21, char __q20, char __q19, char __q18, char __q17, char __q16, char __q15, char __q14, char __q13, char __q12, char __q11, char __q10, char __q09, char __q08, char __q07, char __q06, char __q05, char __q04, char __q03, char __q02, char __q01, char __q00) { return __extension__(__m512i)(__v64qi){ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31, __q32, __q33, __q34, __q35, __q36, __q37, __q38, __q39, __q40, __q41, __q42, __q43, __q44, __q45, __q46, __q47, __q48, __q49, __q50, __q51, __q52, __q53, __q54, __q55, __q56, __q57, __q58, __q59, __q60, __q61, __q62, __q63}; } __funline __m512d _mm512_set_pd(double __A, double __B, double __C, double __D, double __E, double __F, double __G, double __H) { return __extension__(__m512d){__H, __G, __F, __E, __D, __C, __B, __A}; } __funline __m512 _mm512_set_ps(float __A, float __B, float __C, float __D, float __E, float __F, float __G, float __H, float __I, float __J, float __K, float __L, float __M, float __N, float __O, float __P) { return __extension__(__m512){__P, __O, __N, __M, __L, __K, __J, __I, __H, __G, __F, __E, __D, __C, __B, __A}; } #define _mm512_setr_epi64(e0, e1, e2, e3, e4, e5, e6, e7) \ _mm512_set_epi64(e7, e6, e5, e4, e3, e2, e1, e0) #define _mm512_setr_epi32(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, \ e12, e13, e14, e15) \ _mm512_set_epi32(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, \ e2, e1, e0) #define _mm512_setr_pd(e0, e1, e2, e3, e4, e5, e6, e7) \ _mm512_set_pd(e7, e6, e5, e4, e3, e2, e1, e0) #define _mm512_setr_ps(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, \ e13, e14, e15) \ _mm512_set_ps(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, \ e1, e0) __funline __m512 _mm512_undefined_ps(void) { __m512 __Y = __Y; return __Y; } #define _mm512_undefined _mm512_undefined_ps __funline __m512d _mm512_undefined_pd(void) { __m512d __Y = __Y; return __Y; } __funline __m512i _mm512_undefined_epi32(void) { __m512i __Y = __Y; return __Y; } #define _mm512_undefined_si512 _mm512_undefined_epi32 __funline __m512i _mm512_set1_epi8(char __A) { return __extension__(__m512i)(__v64qi){ __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A}; } __funline __m512i _mm512_set1_epi16(short __A) { return __extension__(__m512i)(__v32hi){ __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A}; } __funline __m512d _mm512_set1_pd(double __A) { return (__m512d)__builtin_ia32_broadcastsd512( __extension__(__v2df){ __A, }, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512 _mm512_set1_ps(float __A) { return (__m512)__builtin_ia32_broadcastss512( __extension__(__v4sf){ __A, }, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512i _mm512_set4_epi32(int __A, int __B, int __C, int __D) { return __extension__(__m512i)(__v16si){__D, __C, __B, __A, __D, __C, __B, __A, __D, __C, __B, __A, __D, __C, __B, __A}; } __funline __m512i _mm512_set4_epi64(long long __A, long long __B, long long __C, long long __D) { return __extension__(__m512i)(__v8di){__D, __C, __B, __A, __D, __C, __B, __A}; } __funline __m512d _mm512_set4_pd(double __A, double __B, double __C, double __D) { return __extension__(__m512d){__D, __C, __B, __A, __D, __C, __B, __A}; } __funline __m512 _mm512_set4_ps(float __A, float __B, float __C, float __D) { return __extension__(__m512){__D, __C, __B, __A, __D, __C, __B, __A, __D, __C, __B, __A, __D, __C, __B, __A}; } #define _mm512_setr4_epi64(e0, e1, e2, e3) _mm512_set4_epi64(e3, e2, e1, e0) #define _mm512_setr4_epi32(e0, e1, e2, e3) _mm512_set4_epi32(e3, e2, e1, e0) #define _mm512_setr4_pd(e0, e1, e2, e3) _mm512_set4_pd(e3, e2, e1, e0) #define _mm512_setr4_ps(e0, e1, e2, e3) _mm512_set4_ps(e3, e2, e1, e0) __funline __m512 _mm512_setzero_ps(void) { return __extension__(__m512){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; } __funline __m512 _mm512_setzero(void) { return _mm512_setzero_ps(); } __funline __m512d _mm512_setzero_pd(void) { return __extension__(__m512d){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; } __funline __m512i _mm512_setzero_epi32(void) { return __extension__(__m512i)(__v8di){0, 0, 0, 0, 0, 0, 0, 0}; } __funline __m512i _mm512_setzero_si512(void) { return __extension__(__m512i)(__v8di){0, 0, 0, 0, 0, 0, 0, 0}; } __funline __m512d _mm512_mask_mov_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_movapd512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_mov_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_movapd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_mask_mov_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movaps512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_mov_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movaps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512d _mm512_load_pd(void const *__P) { return *(__m512d *)__P; } __funline __m512d _mm512_mask_load_pd(__m512d __W, __mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_loadapd512_mask((const __v8df *)__P, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_load_pd(__mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_loadapd512_mask( (const __v8df *)__P, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline void _mm512_store_pd(void *__P, __m512d __A) { *(__m512d *)__P = __A; } __funline void _mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A) { __builtin_ia32_storeapd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)__U); } __funline __m512 _mm512_load_ps(void const *__P) { return *(__m512 *)__P; } __funline __m512 _mm512_mask_load_ps(__m512 __W, __mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_loadaps512_mask((const __v16sf *)__P, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_load_ps(__mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_loadaps512_mask( (const __v16sf *)__P, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline void _mm512_store_ps(void *__P, __m512 __A) { *(__m512 *)__P = __A; } __funline void _mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A) { __builtin_ia32_storeaps512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)__U); } __funline __m512i _mm512_mask_mov_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdqa64_512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdqa64_512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_load_epi64(void const *__P) { return *(__m512i *)__P; } __funline __m512i _mm512_mask_load_epi64(__m512i __W, __mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_movdqa64load512_mask( (const __v8di *)__P, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_load_epi64(__mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_movdqa64load512_mask( (const __v8di *)__P, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline void _mm512_store_epi64(void *__P, __m512i __A) { *(__m512i *)__P = __A; } __funline void _mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A) { __builtin_ia32_movdqa64store512_mask((__v8di *)__P, (__v8di)__A, (__mmask8)__U); } __funline __m512i _mm512_mask_mov_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdqa32_512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_mov_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdqa32_512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_load_si512(void const *__P) { return *(__m512i *)__P; } __funline __m512i _mm512_load_epi32(void const *__P) { return *(__m512i *)__P; } __funline __m512i _mm512_mask_load_epi32(__m512i __W, __mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_movdqa32load512_mask( (const __v16si *)__P, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_load_epi32(__mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_movdqa32load512_mask( (const __v16si *)__P, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline void _mm512_store_si512(void *__P, __m512i __A) { *(__m512i *)__P = __A; } __funline void _mm512_store_epi32(void *__P, __m512i __A) { *(__m512i *)__P = __A; } __funline void _mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A) { __builtin_ia32_movdqa32store512_mask((__v16si *)__P, (__v16si)__A, (__mmask16)__U); } __funline __m512i _mm512_mullo_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A * (__v16su)__B); } __funline __m512i _mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulld512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulld512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, __M); } __funline __m512i _mm512_mullox_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A * (__v8du)__B); } __funline __m512i _mm512_mask_mullox_epi64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { return _mm512_mask_mov_epi64(__W, __M, _mm512_mullox_epi64(__A, __B)); } __funline __m512i _mm512_sllv_epi32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv16si_mask( (__v16si)__X, (__v16si)__Y, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_srav_epi32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav16si_mask( (__v16si)__X, (__v16si)__Y, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_srlv_epi32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv16si_mask( (__v16si)__X, (__v16si)__Y, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv16si_mask((__v16si)__X, (__v16si)__Y, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_add_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A + (__v8du)__B); } __funline __m512i _mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_sub_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A - (__v8du)__B); } __funline __m512i _mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_sllv_epi64(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512i _mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv8di_mask((__v8di)__X, (__v8di)__Y, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psllv8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_srav_epi64(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav8di_mask((__v8di)__X, (__v8di)__Y, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrav8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_srlv_epi64(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv8di_mask((__v8di)__X, (__v8di)__Y, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_psrlv8di_mask( (__v8di)__X, (__v8di)__Y, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_add_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A + (__v16su)__B); } __funline __m512i _mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_add_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_mul_epi32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuldq512_mask( (__v16si)__X, (__v16si)__Y, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuldq512_mask((__v16si)__X, (__v16si)__Y, (__v8di)__W, __M); } __funline __m512i _mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuldq512_mask( (__v16si)__X, (__v16si)__Y, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_sub_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A - (__v16su)__B); } __funline __m512i _mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_mul_epu32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuludq512_mask( (__v16si)__X, (__v16si)__Y, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuludq512_mask((__v16si)__X, (__v16si)__Y, (__v8di)__W, __M); } __funline __m512i _mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmuludq512_mask( (__v16si)__X, (__v16si)__Y, (__v8di)_mm512_setzero_si512(), __M); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_slli_epi64(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psllqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psllqi512_mask((__v8di)__A, __B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psllqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #else #define _mm512_slli_epi64(X, C) \ ((__m512i)__builtin_ia32_psllqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), \ (__v8di)(__m512i)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_slli_epi64(W, U, X, C) \ ((__m512i)__builtin_ia32_psllqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_slli_epi64(U, X, C) \ ((__m512i)__builtin_ia32_psllqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)_mm512_setzero_si512(), \ (__mmask8)(U))) #endif __funline __m512i _mm512_sll_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllq512_mask((__v8di)__A, (__v2di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_srli_epi64(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrlqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)__A, __B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrlqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #else #define _mm512_srli_epi64(X, C) \ ((__m512i)__builtin_ia32_psrlqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), \ (__v8di)(__m512i)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_srli_epi64(W, U, X, C) \ ((__m512i)__builtin_ia32_psrlqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_srli_epi64(U, X, C) \ ((__m512i)__builtin_ia32_psrlqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)_mm512_setzero_si512(), \ (__mmask8)(U))) #endif __funline __m512i _mm512_srl_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlq512_mask((__v8di)__A, (__v2di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_srai_epi64(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psraqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psraqi512_mask((__v8di)__A, __B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psraqi512_mask( (__v8di)__A, __B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #else #define _mm512_srai_epi64(X, C) \ ((__m512i)__builtin_ia32_psraqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), \ (__v8di)(__m512i)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_srai_epi64(W, U, X, C) \ ((__m512i)__builtin_ia32_psraqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_srai_epi64(U, X, C) \ ((__m512i)__builtin_ia32_psraqi512_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v8di)(__m512i)_mm512_setzero_si512(), \ (__mmask8)(U))) #endif __funline __m512i _mm512_sra_epi64(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraq512_mask((__v8di)__A, (__v2di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraq512_mask( (__v8di)__A, (__v2di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_slli_epi32(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_pslldi512_mask( (__v16si)__A, __B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_pslldi512_mask((__v16si)__A, __B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_pslldi512_mask( (__v16si)__A, __B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #else #define _mm512_slli_epi32(X, C) \ ((__m512i)__builtin_ia32_pslldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_slli_epi32(W, U, X, C) \ ((__m512i)__builtin_ia32_pslldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_slli_epi32(U, X, C) \ ((__m512i)__builtin_ia32_pslldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #endif __funline __m512i _mm512_sll_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_pslld512_mask( (__v16si)__A, (__v4si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_pslld512_mask((__v16si)__A, (__v4si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_pslld512_mask((__v16si)__A, (__v4si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_srli_epi32(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrldi512_mask( (__v16si)__A, __B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrldi512_mask((__v16si)__A, __B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psrldi512_mask( (__v16si)__A, __B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #else #define _mm512_srli_epi32(X, C) \ ((__m512i)__builtin_ia32_psrldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_srli_epi32(W, U, X, C) \ ((__m512i)__builtin_ia32_psrldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_srli_epi32(U, X, C) \ ((__m512i)__builtin_ia32_psrldi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #endif __funline __m512i _mm512_srl_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrld512_mask( (__v16si)__A, (__v4si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrld512_mask((__v16si)__A, (__v4si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrld512_mask((__v16si)__A, (__v4si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_srai_epi32(__m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psradi512_mask( (__v16si)__A, __B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psradi512_mask((__v16si)__A, __B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { return (__m512i)__builtin_ia32_psradi512_mask( (__v16si)__A, __B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #else #define _mm512_srai_epi32(X, C) \ ((__m512i)__builtin_ia32_psradi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_srai_epi32(W, U, X, C) \ ((__m512i)__builtin_ia32_psradi512_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_srai_epi32(U, X, C) \ ((__m512i)__builtin_ia32_psradi512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #endif __funline __m512i _mm512_sra_epi32(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrad512_mask( (__v16si)__A, (__v4si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrad512_mask((__v16si)__A, (__v4si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrad512_mask((__v16si)__A, (__v4si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m128d _mm_add_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_addsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_add_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_addsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_add_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_addsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_add_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_addss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_add_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_addss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_add_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_addss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } __funline __m128d _mm_sub_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_subsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_sub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_subsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_sub_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_subsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_sub_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_subss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_sub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_subss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_sub_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_subss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } #else #define _mm_add_round_sd(A, B, C) (__m128d) __builtin_ia32_addsd_round(A, B, C) #define _mm_mask_add_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_addsd_mask_round(A, B, W, U, C) #define _mm_maskz_add_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_addsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_add_round_ss(A, B, C) (__m128) __builtin_ia32_addss_round(A, B, C) #define _mm_mask_add_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_addss_mask_round(A, B, W, U, C) #define _mm_maskz_add_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_addss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #define _mm_sub_round_sd(A, B, C) (__m128d) __builtin_ia32_subsd_round(A, B, C) #define _mm_mask_sub_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_subsd_mask_round(A, B, W, U, C) #define _mm_maskz_sub_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_subsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_sub_round_ss(A, B, C) (__m128) __builtin_ia32_subss_round(A, B, C) #define _mm_mask_sub_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_subss_mask_round(A, B, W, U, C) #define _mm_maskz_sub_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_subss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #endif #ifdef __OPTIMIZE__ __funline __m512i _mm512_ternarylogic_epi64(__m512i __A, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)__C, __imm, (__mmask8)-1); } __funline __m512i _mm512_mask_ternarylogic_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)__C, __imm, (__mmask8)__U); } __funline __m512i _mm512_maskz_ternarylogic_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogq512_maskz( (__v8di)__A, (__v8di)__B, (__v8di)__C, __imm, (__mmask8)__U); } __funline __m512i _mm512_ternarylogic_epi32(__m512i __A, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)__C, __imm, (__mmask16)-1); } __funline __m512i _mm512_mask_ternarylogic_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)__C, __imm, (__mmask16)__U); } __funline __m512i _mm512_maskz_ternarylogic_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C, const int __imm) { return (__m512i)__builtin_ia32_pternlogd512_maskz( (__v16si)__A, (__v16si)__B, (__v16si)__C, __imm, (__mmask16)__U); } #else #define _mm512_ternarylogic_epi64(A, B, C, I) \ ((__m512i)__builtin_ia32_pternlogq512_mask( \ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ (int)(I), (__mmask8)-1)) #define _mm512_mask_ternarylogic_epi64(A, U, B, C, I) \ ((__m512i)__builtin_ia32_pternlogq512_mask( \ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ (int)(I), (__mmask8)(U))) #define _mm512_maskz_ternarylogic_epi64(U, A, B, C, I) \ ((__m512i)__builtin_ia32_pternlogq512_maskz( \ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ (int)(I), (__mmask8)(U))) #define _mm512_ternarylogic_epi32(A, B, C, I) \ ((__m512i)__builtin_ia32_pternlogd512_mask( \ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ (int)(I), (__mmask16)-1)) #define _mm512_mask_ternarylogic_epi32(A, U, B, C, I) \ ((__m512i)__builtin_ia32_pternlogd512_mask( \ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ (int)(I), (__mmask16)(U))) #define _mm512_maskz_ternarylogic_epi32(U, A, B, C, I) \ ((__m512i)__builtin_ia32_pternlogd512_maskz( \ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ (int)(I), (__mmask16)(U))) #endif __funline __m512d _mm512_rcp14_pd(__m512d __A) { return (__m512d)__builtin_ia32_rcp14pd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_rcp14_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rcp14pd512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_rcp14_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rcp14pd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_rcp14_ps(__m512 __A) { return (__m512)__builtin_ia32_rcp14ps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_rcp14_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rcp14ps512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_rcp14_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rcp14ps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m128d _mm_rcp14_sd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rcp14sd((__v2df)__B, (__v2df)__A); } __funline __m128d _mm_mask_rcp14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rcp14sd_mask((__v2df)__B, (__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_rcp14_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rcp14sd_mask( (__v2df)__B, (__v2df)__A, (__v2df)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_rcp14_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rcp14ss((__v4sf)__B, (__v4sf)__A); } __funline __m128 _mm_mask_rcp14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rcp14ss_mask((__v4sf)__B, (__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_rcp14_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rcp14ss_mask( (__v4sf)__B, (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m512d _mm512_rsqrt14_pd(__m512d __A) { return (__m512d)__builtin_ia32_rsqrt14pd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_rsqrt14_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rsqrt14pd512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_rsqrt14_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rsqrt14pd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_rsqrt14_ps(__m512 __A) { return (__m512)__builtin_ia32_rsqrt14ps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_rsqrt14_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rsqrt14ps512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_rsqrt14_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rsqrt14ps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m128d _mm_rsqrt14_sd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rsqrt14sd((__v2df)__B, (__v2df)__A); } __funline __m128d _mm_mask_rsqrt14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rsqrt14sd_mask((__v2df)__B, (__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_rsqrt14_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_rsqrt14sd_mask( (__v2df)__B, (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_rsqrt14_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rsqrt14ss((__v4sf)__B, (__v4sf)__A); } __funline __m128 _mm_mask_rsqrt14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rsqrt14ss_mask((__v4sf)__B, (__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_rsqrt14_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_rsqrt14ss_mask( (__v4sf)__B, (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512d _mm512_sqrt_round_pd(__m512d __A, const int __R) { return (__m512d)__builtin_ia32_sqrtpd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_sqrt_round_pd(__m512d __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_sqrt_round_pd(__mmask8 __U, __m512d __A, const int __R) { return (__m512d)__builtin_ia32_sqrtpd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_sqrt_round_ps(__m512 __A, const int __R) { return (__m512)__builtin_ia32_sqrtps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_sqrt_round_ps(__m512 __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_sqrt_round_ps(__mmask16 __U, __m512 __A, const int __R) { return (__m512)__builtin_ia32_sqrtps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m128d _mm_sqrt_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_sqrtsd_mask_round( (__v2df)__B, (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)-1, __R); } __funline __m128d _mm_mask_sqrt_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_sqrtsd_mask_round( (__v2df)__B, (__v2df)__A, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_sqrt_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_sqrtsd_mask_round( (__v2df)__B, (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_sqrt_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_sqrtss_mask_round( (__v4sf)__B, (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)-1, __R); } __funline __m128 _mm_mask_sqrt_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_sqrtss_mask_round( (__v4sf)__B, (__v4sf)__A, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_sqrtss_mask_round( (__v4sf)__B, (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } #else #define _mm512_sqrt_round_pd(A, C) \ (__m512d) \ __builtin_ia32_sqrtpd512_mask(A, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_sqrt_round_pd(W, U, A, C) \ (__m512d) __builtin_ia32_sqrtpd512_mask(A, W, U, C) #define _mm512_maskz_sqrt_round_pd(U, A, C) \ (__m512d) __builtin_ia32_sqrtpd512_mask(A, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_sqrt_round_ps(A, C) \ (__m512) \ __builtin_ia32_sqrtps512_mask(A, (__v16sf)_mm512_undefined_ps(), -1, C) #define _mm512_mask_sqrt_round_ps(W, U, A, C) \ (__m512) __builtin_ia32_sqrtps512_mask(A, W, U, C) #define _mm512_maskz_sqrt_round_ps(U, A, C) \ (__m512) __builtin_ia32_sqrtps512_mask(A, (__v16sf)_mm512_setzero_ps(), U, C) #define _mm_sqrt_round_sd(A, B, C) \ (__m128d) \ __builtin_ia32_sqrtsd_mask_round(B, A, (__v2df)_mm_setzero_pd(), -1, C) #define _mm_mask_sqrt_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_sqrtsd_mask_round(B, A, W, U, C) #define _mm_maskz_sqrt_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_sqrtsd_mask_round(B, A, (__v2df)_mm_setzero_pd(), U, C) #define _mm_sqrt_round_ss(A, B, C) \ (__m128) \ __builtin_ia32_sqrtss_mask_round(B, A, (__v4sf)_mm_setzero_ps(), -1, C) #define _mm_mask_sqrt_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_sqrtss_mask_round(B, A, W, U, C) #define _mm_maskz_sqrt_round_ss(U, A, B, C) \ (__m128) \ __builtin_ia32_sqrtss_mask_round(B, A, (__v4sf)_mm_setzero_ps(), U, C) #endif __funline __m512i _mm512_cvtepi8_epi32(__m128i __A) { return (__m512i)__builtin_ia32_pmovsxbd512_mask( (__v16qi)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxbd512_mask((__v16qi)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxbd512_mask( (__v16qi)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_cvtepi8_epi64(__m128i __A) { return (__m512i)__builtin_ia32_pmovsxbq512_mask( (__v16qi)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxbq512_mask((__v16qi)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxbq512_mask( (__v16qi)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_cvtepi16_epi32(__m256i __A) { return (__m512i)__builtin_ia32_pmovsxwd512_mask( (__v16hi)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovsxwd512_mask((__v16hi)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovsxwd512_mask( (__v16hi)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_cvtepi16_epi64(__m128i __A) { return (__m512i)__builtin_ia32_pmovsxwq512_mask( (__v8hi)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxwq512_mask((__v8hi)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovsxwq512_mask( (__v8hi)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_cvtepi32_epi64(__m256i __X) { return (__m512i)__builtin_ia32_pmovsxdq512_mask( (__v8si)__X, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X) { return (__m512i)__builtin_ia32_pmovsxdq512_mask((__v8si)__X, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X) { return (__m512i)__builtin_ia32_pmovsxdq512_mask( (__v8si)__X, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_cvtepu8_epi32(__m128i __A) { return (__m512i)__builtin_ia32_pmovzxbd512_mask( (__v16qi)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxbd512_mask((__v16qi)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxbd512_mask( (__v16qi)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_cvtepu8_epi64(__m128i __A) { return (__m512i)__builtin_ia32_pmovzxbq512_mask( (__v16qi)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxbq512_mask((__v16qi)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxbq512_mask( (__v16qi)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_cvtepu16_epi32(__m256i __A) { return (__m512i)__builtin_ia32_pmovzxwd512_mask( (__v16hi)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovzxwd512_mask((__v16hi)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovzxwd512_mask( (__v16hi)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_cvtepu16_epi64(__m128i __A) { return (__m512i)__builtin_ia32_pmovzxwq512_mask( (__v8hi)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxwq512_mask((__v8hi)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) { return (__m512i)__builtin_ia32_pmovzxwq512_mask( (__v8hi)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_cvtepu32_epi64(__m256i __X) { return (__m512i)__builtin_ia32_pmovzxdq512_mask( (__v8si)__X, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X) { return (__m512i)__builtin_ia32_pmovzxdq512_mask((__v8si)__X, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X) { return (__m512i)__builtin_ia32_pmovzxdq512_mask( (__v8si)__X, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512d _mm512_add_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_addpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_add_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_addpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_add_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_addpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_add_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_addps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_add_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_addps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_add_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_addps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512d _mm512_sub_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_subpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_sub_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_subpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_sub_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_subpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_sub_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_subps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_sub_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_subps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_sub_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_subps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } #else #define _mm512_add_round_pd(A, B, C) \ (__m512d) \ __builtin_ia32_addpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_add_round_pd(W, U, A, B, C) \ (__m512d) __builtin_ia32_addpd512_mask(A, B, W, U, C) #define _mm512_maskz_add_round_pd(U, A, B, C) \ (__m512d) \ __builtin_ia32_addpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_add_round_ps(A, B, C) \ (__m512) __builtin_ia32_addps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), \ -1, C) #define _mm512_mask_add_round_ps(W, U, A, B, C) \ (__m512) __builtin_ia32_addps512_mask(A, B, W, U, C) #define _mm512_maskz_add_round_ps(U, A, B, C) \ (__m512) \ __builtin_ia32_addps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) #define _mm512_sub_round_pd(A, B, C) \ (__m512d) \ __builtin_ia32_subpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_sub_round_pd(W, U, A, B, C) \ (__m512d) __builtin_ia32_subpd512_mask(A, B, W, U, C) #define _mm512_maskz_sub_round_pd(U, A, B, C) \ (__m512d) \ __builtin_ia32_subpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_sub_round_ps(A, B, C) \ (__m512) __builtin_ia32_subps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), \ -1, C) #define _mm512_mask_sub_round_ps(W, U, A, B, C) \ (__m512) __builtin_ia32_subps512_mask(A, B, W, U, C) #define _mm512_maskz_sub_round_ps(U, A, B, C) \ (__m512) \ __builtin_ia32_subps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) #endif #ifdef __OPTIMIZE__ __funline __m512d _mm512_mul_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_mulpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_mul_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_mulpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_mul_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_mulpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_mul_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_mulps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_mul_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_mulps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_mul_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_mulps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512d _mm512_div_round_pd(__m512d __M, __m512d __V, const int __R) { return (__m512d)__builtin_ia32_divpd512_mask((__v8df)__M, (__v8df)__V, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_div_round_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V, const int __R) { return (__m512d)__builtin_ia32_divpd512_mask((__v8df)__M, (__v8df)__V, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_div_round_pd(__mmask8 __U, __m512d __M, __m512d __V, const int __R) { return (__m512d)__builtin_ia32_divpd512_mask((__v8df)__M, (__v8df)__V, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_div_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_divps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_div_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_divps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_div_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_divps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m128d _mm_mul_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_mulsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_mul_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_mulsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_mul_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_mulsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_mul_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_mulss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_mul_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_mulss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_mul_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_mulss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } __funline __m128d _mm_div_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_divsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_div_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_divsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_div_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_divsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_div_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_divss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_div_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_divss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_div_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_divss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } #else #define _mm512_mul_round_pd(A, B, C) \ (__m512d) \ __builtin_ia32_mulpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_mul_round_pd(W, U, A, B, C) \ (__m512d) __builtin_ia32_mulpd512_mask(A, B, W, U, C) #define _mm512_maskz_mul_round_pd(U, A, B, C) \ (__m512d) \ __builtin_ia32_mulpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_mul_round_ps(A, B, C) \ (__m512) __builtin_ia32_mulps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), \ -1, C) #define _mm512_mask_mul_round_ps(W, U, A, B, C) \ (__m512) __builtin_ia32_mulps512_mask(A, B, W, U, C) #define _mm512_maskz_mul_round_ps(U, A, B, C) \ (__m512) \ __builtin_ia32_mulps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) #define _mm512_div_round_pd(A, B, C) \ (__m512d) \ __builtin_ia32_divpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_div_round_pd(W, U, A, B, C) \ (__m512d) __builtin_ia32_divpd512_mask(A, B, W, U, C) #define _mm512_maskz_div_round_pd(U, A, B, C) \ (__m512d) \ __builtin_ia32_divpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_div_round_ps(A, B, C) \ (__m512) __builtin_ia32_divps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), \ -1, C) #define _mm512_mask_div_round_ps(W, U, A, B, C) \ (__m512) __builtin_ia32_divps512_mask(A, B, W, U, C) #define _mm512_maskz_div_round_ps(U, A, B, C) \ (__m512) \ __builtin_ia32_divps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) #define _mm_mul_round_sd(A, B, C) (__m128d) __builtin_ia32_mulsd_round(A, B, C) #define _mm_mask_mul_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_mulsd_mask_round(A, B, W, U, C) #define _mm_maskz_mul_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_mulsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_mul_round_ss(A, B, C) (__m128) __builtin_ia32_mulss_round(A, B, C) #define _mm_mask_mul_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_mulss_mask_round(A, B, W, U, C) #define _mm_maskz_mul_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_mulss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #define _mm_div_round_sd(A, B, C) (__m128d) __builtin_ia32_divsd_round(A, B, C) #define _mm_mask_div_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_divsd_mask_round(A, B, W, U, C) #define _mm_maskz_div_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_divsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_div_round_ss(A, B, C) (__m128) __builtin_ia32_divss_round(A, B, C) #define _mm_mask_div_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_divss_mask_round(A, B, W, U, C) #define _mm_maskz_div_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_divss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #endif #ifdef __OPTIMIZE__ __funline __m512d _mm512_max_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_maxpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_max_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_maxpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_max_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_maxpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_max_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_maxps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_max_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_maxps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_max_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_maxps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512d _mm512_min_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_minpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_min_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_minpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_min_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_minpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_min_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_minps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_min_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_minps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_min_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_minps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } #else #define _mm512_max_round_pd(A, B, R) \ (__m512d) \ __builtin_ia32_maxpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, R) #define _mm512_mask_max_round_pd(W, U, A, B, R) \ (__m512d) __builtin_ia32_maxpd512_mask(A, B, W, U, R) #define _mm512_maskz_max_round_pd(U, A, B, R) \ (__m512d) \ __builtin_ia32_maxpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, R) #define _mm512_max_round_ps(A, B, R) \ (__m512) __builtin_ia32_maxps512_mask(A, B, (__v16sf)_mm512_undefined_pd(), \ -1, R) #define _mm512_mask_max_round_ps(W, U, A, B, R) \ (__m512) __builtin_ia32_maxps512_mask(A, B, W, U, R) #define _mm512_maskz_max_round_ps(U, A, B, R) \ (__m512) \ __builtin_ia32_maxps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, R) #define _mm512_min_round_pd(A, B, R) \ (__m512d) \ __builtin_ia32_minpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, R) #define _mm512_mask_min_round_pd(W, U, A, B, R) \ (__m512d) __builtin_ia32_minpd512_mask(A, B, W, U, R) #define _mm512_maskz_min_round_pd(U, A, B, R) \ (__m512d) \ __builtin_ia32_minpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, R) #define _mm512_min_round_ps(A, B, R) \ (__m512) __builtin_ia32_minps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), \ -1, R) #define _mm512_mask_min_round_ps(W, U, A, B, R) \ (__m512) __builtin_ia32_minps512_mask(A, B, W, U, R) #define _mm512_maskz_min_round_ps(U, A, B, R) \ (__m512) \ __builtin_ia32_minps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, R) #endif #ifdef __OPTIMIZE__ __funline __m512d _mm512_scalef_round_pd(__m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_scalef_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_scalefpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_scalef_round_pd(__mmask8 __U, __m512d __A, __m512d __B, const int __R) { return (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_scalef_round_ps(__m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_scalefps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_scalef_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_scalefps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_scalef_round_ps(__mmask16 __U, __m512 __A, __m512 __B, const int __R) { return (__m512)__builtin_ia32_scalefps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m128d _mm_scalef_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_scalefsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)-1, __R); } __funline __m128d _mm_mask_scalef_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_scalefsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_scalef_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_scalefsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_scalef_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_scalefss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)-1, __R); } __funline __m128 _mm_mask_scalef_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_scalefss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_scalef_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_scalefss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } #else #define _mm512_scalef_round_pd(A, B, C) \ (__m512d) __builtin_ia32_scalefpd512_mask( \ A, B, (__v8df)_mm512_undefined_pd(), -1, C) #define _mm512_mask_scalef_round_pd(W, U, A, B, C) \ (__m512d) __builtin_ia32_scalefpd512_mask(A, B, W, U, C) #define _mm512_maskz_scalef_round_pd(U, A, B, C) \ (__m512d) \ __builtin_ia32_scalefpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) #define _mm512_scalef_round_ps(A, B, C) \ (__m512) __builtin_ia32_scalefps512_mask( \ A, B, (__v16sf)_mm512_undefined_ps(), -1, C) #define _mm512_mask_scalef_round_ps(W, U, A, B, C) \ (__m512) __builtin_ia32_scalefps512_mask(A, B, W, U, C) #define _mm512_maskz_scalef_round_ps(U, A, B, C) \ (__m512) __builtin_ia32_scalefps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), \ U, C) #define _mm_scalef_round_sd(A, B, C) \ (__m128d) __builtin_ia32_scalefsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), \ -1, C) #define _mm_scalef_round_ss(A, B, C) \ (__m128) __builtin_ia32_scalefss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), \ -1, C) #endif #ifdef __OPTIMIZE__ __funline __m512d _mm512_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fmadd_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfmaddpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fmadd_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fmadd_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fmadd_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fmadd_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfmaddps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fmadd_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512d _mm512_fmsub_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fmsub_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fmsub_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfmsubpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fmsub_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmsubpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fmsub_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fmsub_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fmsub_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfmsubps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fmsub_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmsubps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512d _mm512_fmaddsub_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fmaddsub_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fmaddsub_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fmaddsub_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fmaddsub_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fmaddsub_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fmaddsub_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fmaddsub_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512d _mm512_fmsubadd_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask( (__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fmsubadd_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask( (__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fmsubadd_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fmsubadd_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfmaddsubpd512_maskz( (__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fmsubadd_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fmsubadd_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fmsubadd_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfmsubaddps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fmsubadd_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfmaddsubps512_maskz( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)__U, __R); } __funline __m512d _mm512_fnmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fnmadd_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fnmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fnmadd_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmaddpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fnmadd_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmaddps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fnmadd_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmaddps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fnmadd_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfnmaddps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fnmadd_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmaddps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512d _mm512_fnmsub_round_pd(__m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fnmsub_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_mask3_fnmsub_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U, const int __R) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fnmsub_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C, const int __R) { return (__m512d)__builtin_ia32_vfnmsubpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, __R); } __funline __m512 _mm512_fnmsub_round_ps(__m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fnmsub_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_mask3_fnmsub_round_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U, const int __R) { return (__m512)__builtin_ia32_vfnmsubps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fnmsub_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C, const int __R) { return (__m512)__builtin_ia32_vfnmsubps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, __R); } #else #define _mm512_fmadd_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddpd512_mask(A, B, C, -1, R) #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfmaddpd512_mask(A, B, C, U, R) #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfmaddpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddpd512_maskz(A, B, C, U, R) #define _mm512_fmadd_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfmaddps512_mask(A, B, C, -1, R) #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfmaddps512_mask(A, B, C, U, R) #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfmaddps512_mask3(A, B, C, U, R) #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfmaddps512_maskz(A, B, C, U, R) #define _mm512_fmsub_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfmsubpd512_mask(A, B, C, -1, R) #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfmsubpd512_mask(A, B, C, U, R) #define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfmsubpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfmsubpd512_maskz(A, B, C, U, R) #define _mm512_fmsub_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfmsubps512_mask(A, B, C, -1, R) #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfmsubps512_mask(A, B, C, U, R) #define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfmsubps512_mask3(A, B, C, U, R) #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfmsubps512_maskz(A, B, C, U, R) #define _mm512_fmaddsub_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_mask(A, B, C, -1, R) #define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_mask(A, B, C, U, R) #define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz(A, B, C, U, R) #define _mm512_fmaddsub_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_mask(A, B, C, -1, R) #define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_mask(A, B, C, U, R) #define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfmaddsubps512_mask3(A, B, C, U, R) #define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_maskz(A, B, C, U, R) #define _mm512_fmsubadd_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_mask(A, B, -(C), -1, R) #define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_mask(A, B, -(C), U, R) #define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfmsubaddpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfmaddsubpd512_maskz(A, B, -(C), U, R) #define _mm512_fmsubadd_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_mask(A, B, -(C), -1, R) #define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_mask(A, B, -(C), U, R) #define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfmsubaddps512_mask3(A, B, C, U, R) #define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfmaddsubps512_maskz(A, B, -(C), U, R) #define _mm512_fnmadd_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfnmaddpd512_mask(A, B, C, -1, R) #define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfnmaddpd512_mask(A, B, C, U, R) #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfnmaddpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfnmaddpd512_maskz(A, B, C, U, R) #define _mm512_fnmadd_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfnmaddps512_mask(A, B, C, -1, R) #define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfnmaddps512_mask(A, B, C, U, R) #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfnmaddps512_mask3(A, B, C, U, R) #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfnmaddps512_maskz(A, B, C, U, R) #define _mm512_fnmsub_round_pd(A, B, C, R) \ (__m512d) __builtin_ia32_vfnmsubpd512_mask(A, B, C, -1, R) #define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \ (__m512d) __builtin_ia32_vfnmsubpd512_mask(A, B, C, U, R) #define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \ (__m512d) __builtin_ia32_vfnmsubpd512_mask3(A, B, C, U, R) #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \ (__m512d) __builtin_ia32_vfnmsubpd512_maskz(A, B, C, U, R) #define _mm512_fnmsub_round_ps(A, B, C, R) \ (__m512) __builtin_ia32_vfnmsubps512_mask(A, B, C, -1, R) #define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \ (__m512) __builtin_ia32_vfnmsubps512_mask(A, B, C, U, R) #define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \ (__m512) __builtin_ia32_vfnmsubps512_mask3(A, B, C, U, R) #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \ (__m512) __builtin_ia32_vfnmsubps512_maskz(A, B, C, U, R) #endif __funline __m512i _mm512_abs_epi64(__m512i __A) { return (__m512i)__builtin_ia32_pabsq512_mask( (__v8di)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_abs_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsq512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_abs_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsq512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_abs_epi32(__m512i __A) { return (__m512i)__builtin_ia32_pabsd512_mask( (__v16si)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_abs_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsd512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_abs_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsd512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512 _mm512_broadcastss_ps(__m128 __A) { return (__m512)__builtin_ia32_broadcastss512( (__v4sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_broadcastss_ps(__m512 __O, __mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastss512((__v4sf)__A, (__v16sf)__O, __M); } __funline __m512 _mm512_maskz_broadcastss_ps(__mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastss512( (__v4sf)__A, (__v16sf)_mm512_setzero_ps(), __M); } __funline __m512d _mm512_broadcastsd_pd(__m128d __A) { return (__m512d)__builtin_ia32_broadcastsd512( (__v2df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_broadcastsd_pd(__m512d __O, __mmask8 __M, __m128d __A) { return (__m512d)__builtin_ia32_broadcastsd512((__v2df)__A, (__v8df)__O, __M); } __funline __m512d _mm512_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A) { return (__m512d)__builtin_ia32_broadcastsd512( (__v2df)__A, (__v8df)_mm512_setzero_pd(), __M); } __funline __m512i _mm512_broadcastd_epi32(__m128i __A) { return (__m512i)__builtin_ia32_pbroadcastd512( (__v4si)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_broadcastd_epi32(__m512i __O, __mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastd512((__v4si)__A, (__v16si)__O, __M); } __funline __m512i _mm512_maskz_broadcastd_epi32(__mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastd512( (__v4si)__A, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_set1_epi32(int __A) { return (__m512i)__builtin_ia32_pbroadcastd512_gpr_mask( __A, (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1)); } __funline __m512i _mm512_mask_set1_epi32(__m512i __O, __mmask16 __M, int __A) { return (__m512i)__builtin_ia32_pbroadcastd512_gpr_mask(__A, (__v16si)__O, __M); } __funline __m512i _mm512_maskz_set1_epi32(__mmask16 __M, int __A) { return (__m512i)__builtin_ia32_pbroadcastd512_gpr_mask( __A, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_broadcastq_epi64(__m128i __A) { return (__m512i)__builtin_ia32_pbroadcastq512( (__v2di)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_broadcastq_epi64(__m512i __O, __mmask8 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastq512((__v2di)__A, (__v8di)__O, __M); } __funline __m512i _mm512_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastq512( (__v2di)__A, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_set1_epi64(long long __A) { return (__m512i)__builtin_ia32_pbroadcastq512_gpr_mask( __A, (__v8di)_mm512_undefined_epi32(), (__mmask8)(-1)); } __funline __m512i _mm512_mask_set1_epi64(__m512i __O, __mmask8 __M, long long __A) { return (__m512i)__builtin_ia32_pbroadcastq512_gpr_mask(__A, (__v8di)__O, __M); } __funline __m512i _mm512_maskz_set1_epi64(__mmask8 __M, long long __A) { return (__m512i)__builtin_ia32_pbroadcastq512_gpr_mask( __A, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512 _mm512_broadcast_f32x4(__m128 __A) { return (__m512)__builtin_ia32_broadcastf32x4_512( (__v4sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastf32x4_512((__v4sf)__A, (__v16sf)__O, __M); } __funline __m512 _mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A) { return (__m512)__builtin_ia32_broadcastf32x4_512( (__v4sf)__A, (__v16sf)_mm512_setzero_ps(), __M); } __funline __m512i _mm512_broadcast_i32x4(__m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x4_512( (__v4si)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x4_512((__v4si)__A, (__v16si)__O, __M); } __funline __m512i _mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A) { return (__m512i)__builtin_ia32_broadcasti32x4_512( (__v4si)__A, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512d _mm512_broadcast_f64x4(__m256d __A) { return (__m512d)__builtin_ia32_broadcastf64x4_512( (__v4df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A) { return (__m512d)__builtin_ia32_broadcastf64x4_512((__v4df)__A, (__v8df)__O, __M); } __funline __m512d _mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A) { return (__m512d)__builtin_ia32_broadcastf64x4_512( (__v4df)__A, (__v8df)_mm512_setzero_pd(), __M); } __funline __m512i _mm512_broadcast_i64x4(__m256i __A) { return (__m512i)__builtin_ia32_broadcasti64x4_512( (__v4di)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A) { return (__m512i)__builtin_ia32_broadcasti64x4_512((__v4di)__A, (__v8di)__O, __M); } __funline __m512i _mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A) { return (__m512i)__builtin_ia32_broadcasti64x4_512( (__v4di)__A, (__v8di)_mm512_setzero_si512(), __M); } typedef enum { _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, _MM_PERM_DDDD = 0xFF } _MM_PERM_ENUM; #ifdef __OPTIMIZE__ __funline __m512i _mm512_shuffle_epi32(__m512i __A, _MM_PERM_ENUM __mask) { return (__m512i)__builtin_ia32_pshufd512_mask( (__v16si)__A, __mask, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_shuffle_epi32(__m512i __W, __mmask16 __U, __m512i __A, _MM_PERM_ENUM __mask) { return (__m512i)__builtin_ia32_pshufd512_mask((__v16si)__A, __mask, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_shuffle_epi32(__mmask16 __U, __m512i __A, _MM_PERM_ENUM __mask) { return (__m512i)__builtin_ia32_pshufd512_mask( (__v16si)__A, __mask, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_shuffle_i64x2(__m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i64x2_mask( (__v8di)__A, (__v8di)__B, __imm, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i64x2_mask( (__v8di)__A, (__v8di)__B, __imm, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i64x2_mask( (__v8di)__A, (__v8di)__B, __imm, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_shuffle_i32x4(__m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i32x4_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i32x4_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_shuf_i32x4_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512d _mm512_shuffle_f64x2(__m512d __A, __m512d __B, const int __imm) { return (__m512d)__builtin_ia32_shuf_f64x2_mask( (__v8df)__A, (__v8df)__B, __imm, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B, const int __imm) { return (__m512d)__builtin_ia32_shuf_f64x2_mask( (__v8df)__A, (__v8df)__B, __imm, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B, const int __imm) { return (__m512d)__builtin_ia32_shuf_f64x2_mask( (__v8df)__A, (__v8df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_shuffle_f32x4(__m512 __A, __m512 __B, const int __imm) { return (__m512)__builtin_ia32_shuf_f32x4_mask( (__v16sf)__A, (__v16sf)__B, __imm, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B, const int __imm) { return (__m512)__builtin_ia32_shuf_f32x4_mask( (__v16sf)__A, (__v16sf)__B, __imm, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B, const int __imm) { return (__m512)__builtin_ia32_shuf_f32x4_mask( (__v16sf)__A, (__v16sf)__B, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } #else #define _mm512_shuffle_epi32(X, C) \ ((__m512i)__builtin_ia32_pshufd512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_shuffle_epi32(W, U, X, C) \ ((__m512i)__builtin_ia32_pshufd512_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_shuffle_epi32(U, X, C) \ ((__m512i)__builtin_ia32_pshufd512_mask( \ (__v16si)(__m512i)(X), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #define _mm512_shuffle_i64x2(X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i64x2_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)(__m512i)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_shuffle_i64x2(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i64x2_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_shuffle_i64x2(U, X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i64x2_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)(__m512i)_mm512_setzero_si512(), (__mmask8)(U))) #define _mm512_shuffle_i32x4(X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i32x4_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)(__m512i)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_shuffle_i32x4(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i32x4_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_shuffle_i32x4(U, X, Y, C) \ ((__m512i)__builtin_ia32_shuf_i32x4_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)(__m512i)_mm512_setzero_si512(), (__mmask16)(U))) #define _mm512_shuffle_f64x2(X, Y, C) \ ((__m512d)__builtin_ia32_shuf_f64x2_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_undefined_pd(), (__mmask8)-1)) #define _mm512_mask_shuffle_f64x2(W, U, X, Y, C) \ ((__m512d)__builtin_ia32_shuf_f64x2_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_shuffle_f64x2(U, X, Y, C) \ ((__m512d)__builtin_ia32_shuf_f64x2_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_setzero_pd(), (__mmask8)(U))) #define _mm512_shuffle_f32x4(X, Y, C) \ ((__m512)__builtin_ia32_shuf_f32x4_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_undefined_ps(), (__mmask16)-1)) #define _mm512_mask_shuffle_f32x4(W, U, X, Y, C) \ ((__m512)__builtin_ia32_shuf_f32x4_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)(W), (__mmask16)(U))) #define _mm512_maskz_shuffle_f32x4(U, X, Y, C) \ ((__m512)__builtin_ia32_shuf_f32x4_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_setzero_ps(), (__mmask16)(U))) #endif __funline __m512i _mm512_rolv_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_rolv_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_rolv_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_rorv_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_rorv_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_rorv_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_rolv_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_rolv_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_rolv_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prolvq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_rorv_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_rorv_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_rorv_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_prorvq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m256i _mm512_cvtt_roundpd_epi32(__m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, __R); } __funline __m256i _mm512_mask_cvtt_roundpd_epi32(__m256i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)__A, (__v8si)__W, (__mmask8)__U, __R); } __funline __m256i _mm512_maskz_cvtt_roundpd_epi32(__mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, __R); } __funline __m256i _mm512_cvtt_roundpd_epu32(__m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, __R); } __funline __m256i _mm512_mask_cvtt_roundpd_epu32(__m256i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)__A, (__v8si)__W, (__mmask8)__U, __R); } __funline __m256i _mm512_maskz_cvtt_roundpd_epu32(__mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, __R); } #else #define _mm512_cvtt_roundpd_epi32(A, B) \ ((__m256i)__builtin_ia32_cvttpd2dq512_mask( \ A, (__v8si)_mm256_undefined_si256(), -1, B)) #define _mm512_mask_cvtt_roundpd_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_cvttpd2dq512_mask(A, (__v8si)(W), U, B)) #define _mm512_maskz_cvtt_roundpd_epi32(U, A, B) \ ((__m256i)__builtin_ia32_cvttpd2dq512_mask( \ A, (__v8si)_mm256_setzero_si256(), U, B)) #define _mm512_cvtt_roundpd_epu32(A, B) \ ((__m256i)__builtin_ia32_cvttpd2udq512_mask( \ A, (__v8si)_mm256_undefined_si256(), -1, B)) #define _mm512_mask_cvtt_roundpd_epu32(W, U, A, B) \ ((__m256i)__builtin_ia32_cvttpd2udq512_mask(A, (__v8si)(W), U, B)) #define _mm512_maskz_cvtt_roundpd_epu32(U, A, B) \ ((__m256i)__builtin_ia32_cvttpd2udq512_mask( \ A, (__v8si)_mm256_setzero_si256(), U, B)) #endif #ifdef __OPTIMIZE__ __funline __m256i _mm512_cvt_roundpd_epi32(__m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, __R); } __funline __m256i _mm512_mask_cvt_roundpd_epi32(__m256i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)__A, (__v8si)__W, (__mmask8)__U, __R); } __funline __m256i _mm512_maskz_cvt_roundpd_epi32(__mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, __R); } __funline __m256i _mm512_cvt_roundpd_epu32(__m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, __R); } __funline __m256i _mm512_mask_cvt_roundpd_epu32(__m256i __W, __mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)__A, (__v8si)__W, (__mmask8)__U, __R); } __funline __m256i _mm512_maskz_cvt_roundpd_epu32(__mmask8 __U, __m512d __A, const int __R) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, __R); } #else #define _mm512_cvt_roundpd_epi32(A, B) \ ((__m256i)__builtin_ia32_cvtpd2dq512_mask( \ A, (__v8si)_mm256_undefined_si256(), -1, B)) #define _mm512_mask_cvt_roundpd_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_cvtpd2dq512_mask(A, (__v8si)(W), U, B)) #define _mm512_maskz_cvt_roundpd_epi32(U, A, B) \ ((__m256i)__builtin_ia32_cvtpd2dq512_mask(A, (__v8si)_mm256_setzero_si256(), \ U, B)) #define _mm512_cvt_roundpd_epu32(A, B) \ ((__m256i)__builtin_ia32_cvtpd2udq512_mask( \ A, (__v8si)_mm256_undefined_si256(), -1, B)) #define _mm512_mask_cvt_roundpd_epu32(W, U, A, B) \ ((__m256i)__builtin_ia32_cvtpd2udq512_mask(A, (__v8si)(W), U, B)) #define _mm512_maskz_cvt_roundpd_epu32(U, A, B) \ ((__m256i)__builtin_ia32_cvtpd2udq512_mask( \ A, (__v8si)_mm256_setzero_si256(), U, B)) #endif #ifdef __OPTIMIZE__ __funline __m512i _mm512_cvtt_roundps_epi32(__m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundps_epi32(__m512i __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)__A, (__v16si)__W, (__mmask16)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundps_epi32(__mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, __R); } __funline __m512i _mm512_cvtt_roundps_epu32(__m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, __R); } __funline __m512i _mm512_mask_cvtt_roundps_epu32(__m512i __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)__A, (__v16si)__W, (__mmask16)__U, __R); } __funline __m512i _mm512_maskz_cvtt_roundps_epu32(__mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvttps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, __R); } #else #define _mm512_cvtt_roundps_epi32(A, B) \ ((__m512i)__builtin_ia32_cvttps2dq512_mask( \ A, (__v16si)_mm512_undefined_epi32(), -1, B)) #define _mm512_mask_cvtt_roundps_epi32(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttps2dq512_mask(A, (__v16si)(W), U, B)) #define _mm512_maskz_cvtt_roundps_epi32(U, A, B) \ ((__m512i)__builtin_ia32_cvttps2dq512_mask( \ A, (__v16si)_mm512_setzero_si512(), U, B)) #define _mm512_cvtt_roundps_epu32(A, B) \ ((__m512i)__builtin_ia32_cvttps2udq512_mask( \ A, (__v16si)_mm512_undefined_epi32(), -1, B)) #define _mm512_mask_cvtt_roundps_epu32(W, U, A, B) \ ((__m512i)__builtin_ia32_cvttps2udq512_mask(A, (__v16si)(W), U, B)) #define _mm512_maskz_cvtt_roundps_epu32(U, A, B) \ ((__m512i)__builtin_ia32_cvttps2udq512_mask( \ A, (__v16si)_mm512_setzero_si512(), U, B)) #endif #ifdef __OPTIMIZE__ __funline __m512i _mm512_cvt_roundps_epi32(__m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, __R); } __funline __m512i _mm512_mask_cvt_roundps_epi32(__m512i __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)__A, (__v16si)__W, (__mmask16)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundps_epi32(__mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, __R); } __funline __m512i _mm512_cvt_roundps_epu32(__m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, __R); } __funline __m512i _mm512_mask_cvt_roundps_epu32(__m512i __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)__A, (__v16si)__W, (__mmask16)__U, __R); } __funline __m512i _mm512_maskz_cvt_roundps_epu32(__mmask16 __U, __m512 __A, const int __R) { return (__m512i)__builtin_ia32_cvtps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, __R); } #else #define _mm512_cvt_roundps_epi32(A, B) \ ((__m512i)__builtin_ia32_cvtps2dq512_mask( \ A, (__v16si)_mm512_undefined_epi32(), -1, B)) #define _mm512_mask_cvt_roundps_epi32(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtps2dq512_mask(A, (__v16si)(W), U, B)) #define _mm512_maskz_cvt_roundps_epi32(U, A, B) \ ((__m512i)__builtin_ia32_cvtps2dq512_mask( \ A, (__v16si)_mm512_setzero_si512(), U, B)) #define _mm512_cvt_roundps_epu32(A, B) \ ((__m512i)__builtin_ia32_cvtps2udq512_mask( \ A, (__v16si)_mm512_undefined_epi32(), -1, B)) #define _mm512_mask_cvt_roundps_epu32(W, U, A, B) \ ((__m512i)__builtin_ia32_cvtps2udq512_mask(A, (__v16si)(W), U, B)) #define _mm512_maskz_cvt_roundps_epu32(U, A, B) \ ((__m512i)__builtin_ia32_cvtps2udq512_mask( \ A, (__v16si)_mm512_setzero_si512(), U, B)) #endif __funline __m128d _mm_cvtu32_sd(__m128d __A, unsigned __B) { return (__m128d)__builtin_ia32_cvtusi2sd32((__v2df)__A, __B); } #ifdef __x86_64__ #ifdef __OPTIMIZE__ __funline __m128d _mm_cvt_roundu64_sd(__m128d __A, unsigned long long __B, const int __R) { return (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)__A, __B, __R); } __funline __m128d _mm_cvt_roundi64_sd(__m128d __A, long long __B, const int __R) { return (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)__A, __B, __R); } __funline __m128d _mm_cvt_roundsi64_sd(__m128d __A, long long __B, const int __R) { return (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)__A, __B, __R); } #else #define _mm_cvt_roundu64_sd(A, B, C) \ (__m128d) __builtin_ia32_cvtusi2sd64(A, B, C) #define _mm_cvt_roundi64_sd(A, B, C) \ (__m128d) __builtin_ia32_cvtsi2sd64(A, B, C) #define _mm_cvt_roundsi64_sd(A, B, C) \ (__m128d) __builtin_ia32_cvtsi2sd64(A, B, C) #endif #endif #ifdef __OPTIMIZE__ __funline __m128 _mm_cvt_roundu32_ss(__m128 __A, unsigned __B, const int __R) { return (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)__A, __B, __R); } __funline __m128 _mm_cvt_roundsi32_ss(__m128 __A, int __B, const int __R) { return (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)__A, __B, __R); } __funline __m128 _mm_cvt_roundi32_ss(__m128 __A, int __B, const int __R) { return (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)__A, __B, __R); } #else #define _mm_cvt_roundu32_ss(A, B, C) \ (__m128) __builtin_ia32_cvtusi2ss32(A, B, C) #define _mm_cvt_roundi32_ss(A, B, C) (__m128) __builtin_ia32_cvtsi2ss32(A, B, C) #define _mm_cvt_roundsi32_ss(A, B, C) \ (__m128) __builtin_ia32_cvtsi2ss32(A, B, C) #endif #ifdef __x86_64__ #ifdef __OPTIMIZE__ __funline __m128 _mm_cvt_roundu64_ss(__m128 __A, unsigned long long __B, const int __R) { return (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)__A, __B, __R); } __funline __m128 _mm_cvt_roundsi64_ss(__m128 __A, long long __B, const int __R) { return (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)__A, __B, __R); } __funline __m128 _mm_cvt_roundi64_ss(__m128 __A, long long __B, const int __R) { return (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)__A, __B, __R); } #else #define _mm_cvt_roundu64_ss(A, B, C) \ (__m128) __builtin_ia32_cvtusi2ss64(A, B, C) #define _mm_cvt_roundi64_ss(A, B, C) (__m128) __builtin_ia32_cvtsi2ss64(A, B, C) #define _mm_cvt_roundsi64_ss(A, B, C) \ (__m128) __builtin_ia32_cvtsi2ss64(A, B, C) #endif #endif __funline __m128i _mm512_cvtepi32_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovdb512_mask( (__v16si)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm512_mask_cvtepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovdb512mem_mask((__v16qi *)__P, (__v16si)__A, __M); } __funline __m128i _mm512_mask_cvtepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovdb512_mask((__v16si)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtepi32_epi8(__mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovdb512_mask( (__v16si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtsepi32_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovsdb512_mask( (__v16si)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm512_mask_cvtsepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovsdb512mem_mask((__v16qi *)__P, (__v16si)__A, __M); } __funline __m128i _mm512_mask_cvtsepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsdb512_mask((__v16si)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtsepi32_epi8(__mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsdb512_mask( (__v16si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtusepi32_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovusdb512_mask( (__v16si)__A, (__v16qi)_mm_undefined_si128(), (__mmask16)-1); } __funline void _mm512_mask_cvtusepi32_storeu_epi8(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovusdb512mem_mask((__v16qi *)__P, (__v16si)__A, __M); } __funline __m128i _mm512_mask_cvtusepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusdb512_mask((__v16si)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtusepi32_epi8(__mmask16 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusdb512_mask( (__v16si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m256i _mm512_cvtepi32_epi16(__m512i __A) { return (__m256i)__builtin_ia32_pmovdw512_mask( (__v16si)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1); } __funline void _mm512_mask_cvtepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovdw512mem_mask((__v16hi *)__P, (__v16si)__A, __M); } __funline __m256i _mm512_mask_cvtepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovdw512_mask((__v16si)__A, (__v16hi)__O, __M); } __funline __m256i _mm512_maskz_cvtepi32_epi16(__mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovdw512_mask( (__v16si)__A, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtsepi32_epi16(__m512i __A) { return (__m256i)__builtin_ia32_pmovsdw512_mask( (__v16si)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1); } __funline void _mm512_mask_cvtsepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovsdw512mem_mask((__v16hi *)__P, (__v16si)__A, __M); } __funline __m256i _mm512_mask_cvtsepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovsdw512_mask((__v16si)__A, (__v16hi)__O, __M); } __funline __m256i _mm512_maskz_cvtsepi32_epi16(__mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovsdw512_mask( (__v16si)__A, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtusepi32_epi16(__m512i __A) { return (__m256i)__builtin_ia32_pmovusdw512_mask( (__v16si)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1); } __funline void _mm512_mask_cvtusepi32_storeu_epi16(void *__P, __mmask16 __M, __m512i __A) { __builtin_ia32_pmovusdw512mem_mask((__v16hi *)__P, (__v16si)__A, __M); } __funline __m256i _mm512_mask_cvtusepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovusdw512_mask((__v16si)__A, (__v16hi)__O, __M); } __funline __m256i _mm512_maskz_cvtusepi32_epi16(__mmask16 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovusdw512_mask( (__v16si)__A, (__v16hi)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtepi64_epi32(__m512i __A) { return (__m256i)__builtin_ia32_pmovqd512_mask( (__v8di)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); } __funline void _mm512_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovqd512mem_mask((__v8si *)__P, (__v8di)__A, __M); } __funline __m256i _mm512_mask_cvtepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovqd512_mask((__v8di)__A, (__v8si)__O, __M); } __funline __m256i _mm512_maskz_cvtepi64_epi32(__mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovqd512_mask( (__v8di)__A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtsepi64_epi32(__m512i __A) { return (__m256i)__builtin_ia32_pmovsqd512_mask( (__v8di)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); } __funline void _mm512_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovsqd512mem_mask((__v8si *)__P, (__v8di)__A, __M); } __funline __m256i _mm512_mask_cvtsepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovsqd512_mask((__v8di)__A, (__v8si)__O, __M); } __funline __m256i _mm512_maskz_cvtsepi64_epi32(__mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovsqd512_mask( (__v8di)__A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtusepi64_epi32(__m512i __A) { return (__m256i)__builtin_ia32_pmovusqd512_mask( (__v8di)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); } __funline void _mm512_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovusqd512mem_mask((__v8si *)__P, (__v8di)__A, __M); } __funline __m256i _mm512_mask_cvtusepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovusqd512_mask((__v8di)__A, (__v8si)__O, __M); } __funline __m256i _mm512_maskz_cvtusepi64_epi32(__mmask8 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovusqd512_mask( (__v8di)__A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m128i _mm512_cvtepi64_epi16(__m512i __A) { return (__m128i)__builtin_ia32_pmovqw512_mask( (__v8di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovqw512mem_mask((__v8hi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovqw512_mask((__v8di)__A, (__v8hi)__O, __M); } __funline __m128i _mm512_maskz_cvtepi64_epi16(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovqw512_mask( (__v8di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtsepi64_epi16(__m512i __A) { return (__m128i)__builtin_ia32_pmovsqw512_mask( (__v8di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovsqw512mem_mask((__v8hi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsqw512_mask((__v8di)__A, (__v8hi)__O, __M); } __funline __m128i _mm512_maskz_cvtsepi64_epi16(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsqw512_mask( (__v8di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtusepi64_epi16(__m512i __A) { return (__m128i)__builtin_ia32_pmovusqw512_mask( (__v8di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovusqw512mem_mask((__v8hi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusqw512_mask((__v8di)__A, (__v8hi)__O, __M); } __funline __m128i _mm512_maskz_cvtusepi64_epi16(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusqw512_mask( (__v8di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtepi64_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovqb512_mask( (__v8di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovqb512mem_mask((__v16qi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovqb512_mask((__v8di)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtepi64_epi8(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovqb512_mask( (__v8di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtsepi64_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovsqb512_mask( (__v8di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovsqb512mem_mask((__v16qi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsqb512_mask((__v8di)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtsepi64_epi8(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovsqb512_mask( (__v8di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm512_cvtusepi64_epi8(__m512i __A) { return (__m128i)__builtin_ia32_pmovusqb512_mask( (__v8di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm512_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m512i __A) { __builtin_ia32_pmovusqb512mem_mask((__v16qi *)__P, (__v8di)__A, __M); } __funline __m128i _mm512_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusqb512_mask((__v8di)__A, (__v16qi)__O, __M); } __funline __m128i _mm512_maskz_cvtusepi64_epi8(__mmask8 __M, __m512i __A) { return (__m128i)__builtin_ia32_pmovusqb512_mask( (__v8di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m512d _mm512_cvtepi32_pd(__m256i __A) { return (__m512d)__builtin_ia32_cvtdq2pd512_mask( (__v8si)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_cvtepi32_pd(__m512d __W, __mmask8 __U, __m256i __A) { return (__m512d)__builtin_ia32_cvtdq2pd512_mask((__v8si)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_cvtepi32_pd(__mmask8 __U, __m256i __A) { return (__m512d)__builtin_ia32_cvtdq2pd512_mask( (__v8si)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512d _mm512_cvtepu32_pd(__m256i __A) { return (__m512d)__builtin_ia32_cvtudq2pd512_mask( (__v8si)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_cvtepu32_pd(__m512d __W, __mmask8 __U, __m256i __A) { return (__m512d)__builtin_ia32_cvtudq2pd512_mask((__v8si)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_cvtepu32_pd(__mmask8 __U, __m256i __A) { return (__m512d)__builtin_ia32_cvtudq2pd512_mask( (__v8si)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512 _mm512_cvt_roundepi32_ps(__m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtdq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_cvt_roundepi32_ps(__m512 __W, __mmask16 __U, __m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)__A, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_cvt_roundepi32_ps(__mmask16 __U, __m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtdq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512 _mm512_cvt_roundepu32_ps(__m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtudq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_cvt_roundepu32_ps(__m512 __W, __mmask16 __U, __m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)__A, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_cvt_roundepu32_ps(__mmask16 __U, __m512i __A, const int __R) { return (__m512)__builtin_ia32_cvtudq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } #else #define _mm512_cvt_roundepi32_ps(A, B) \ (__m512) __builtin_ia32_cvtdq2ps512_mask( \ (__v16si)(A), (__v16sf)_mm512_undefined_ps(), -1, B) #define _mm512_mask_cvt_roundepi32_ps(W, U, A, B) \ (__m512) __builtin_ia32_cvtdq2ps512_mask((__v16si)(A), W, U, B) #define _mm512_maskz_cvt_roundepi32_ps(U, A, B) \ (__m512) __builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \ (__v16sf)_mm512_setzero_ps(), U, B) #define _mm512_cvt_roundepu32_ps(A, B) \ (__m512) __builtin_ia32_cvtudq2ps512_mask( \ (__v16si)(A), (__v16sf)_mm512_undefined_ps(), -1, B) #define _mm512_mask_cvt_roundepu32_ps(W, U, A, B) \ (__m512) __builtin_ia32_cvtudq2ps512_mask((__v16si)(A), W, U, B) #define _mm512_maskz_cvt_roundepu32_ps(U, A, B) \ (__m512) __builtin_ia32_cvtudq2ps512_mask( \ (__v16si)(A), (__v16sf)_mm512_setzero_ps(), U, B) #endif #ifdef __OPTIMIZE__ __funline __m256d _mm512_extractf64x4_pd(__m512d __A, const int __imm) { return (__m256d)__builtin_ia32_extractf64x4_mask( (__v8df)__A, __imm, (__v4df)_mm256_undefined_pd(), (__mmask8)-1); } __funline __m256d _mm512_mask_extractf64x4_pd(__m256d __W, __mmask8 __U, __m512d __A, const int __imm) { return (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)__A, __imm, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm512_maskz_extractf64x4_pd(__mmask8 __U, __m512d __A, const int __imm) { return (__m256d)__builtin_ia32_extractf64x4_mask( (__v8df)__A, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm512_extractf32x4_ps(__m512 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_mask( (__v16sf)__A, __imm, (__v4sf)_mm_undefined_ps(), (__mmask8)-1); } __funline __m128 _mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m512 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)__A, __imm, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm512_maskz_extractf32x4_ps(__mmask8 __U, __m512 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_mask( (__v16sf)__A, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm512_extracti64x4_epi64(__m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti64x4_mask( (__v8di)__A, __imm, (__v4di)_mm256_undefined_si256(), (__mmask8)-1); } __funline __m256i _mm512_mask_extracti64x4_epi64(__m256i __W, __mmask8 __U, __m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)__A, __imm, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm512_maskz_extracti64x4_epi64(__mmask8 __U, __m512i __A, const int __imm) { return (__m256i)__builtin_ia32_extracti64x4_mask( (__v8di)__A, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm512_extracti32x4_epi32(__m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_mask( (__v16si)__A, __imm, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline __m128i _mm512_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)__A, __imm, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm512_maskz_extracti32x4_epi32(__mmask8 __U, __m512i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_mask( (__v16si)__A, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } #else #define _mm512_extractf64x4_pd(X, C) \ ((__m256d)__builtin_ia32_extractf64x4_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v4df)(__m256d)_mm256_undefined_pd(), \ (__mmask8)-1)) #define _mm512_mask_extractf64x4_pd(W, U, X, C) \ ((__m256d)__builtin_ia32_extractf64x4_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm512_maskz_extractf64x4_pd(U, X, C) \ ((__m256d)__builtin_ia32_extractf64x4_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v4df)(__m256d)_mm256_setzero_pd(), \ (__mmask8)(U))) #define _mm512_extractf32x4_ps(X, C) \ ((__m128)__builtin_ia32_extractf32x4_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v4sf)(__m128)_mm_undefined_ps(), \ (__mmask8)-1)) #define _mm512_mask_extractf32x4_ps(W, U, X, C) \ ((__m128)__builtin_ia32_extractf32x4_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm512_maskz_extractf32x4_ps(U, X, C) \ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(X), (int)(C), \ (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)(U))) #define _mm512_extracti64x4_epi64(X, C) \ ((__m256i)__builtin_ia32_extracti64x4_mask( \ (__v8di)(__m512i)(X), (int)(C), \ (__v4di)(__m256i)_mm256_undefined_si256(), (__mmask8)-1)) #define _mm512_mask_extracti64x4_epi64(W, U, X, C) \ ((__m256i)__builtin_ia32_extracti64x4_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm512_maskz_extracti64x4_epi64(U, X, C) \ ((__m256i)__builtin_ia32_extracti64x4_mask( \ (__v8di)(__m512i)(X), (int)(C), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm512_extracti32x4_epi32(X, C) \ ((__m128i)__builtin_ia32_extracti32x4_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v4si)(__m128i)_mm_undefined_si128(), \ (__mmask8)-1)) #define _mm512_mask_extracti32x4_epi32(W, U, X, C) \ ((__m128i)__builtin_ia32_extracti32x4_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm512_maskz_extracti32x4_epi32(U, X, C) \ ((__m128i)__builtin_ia32_extracti32x4_mask( \ (__v16si)(__m512i)(X), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #endif #ifdef __OPTIMIZE__ __funline __m512i _mm512_inserti32x4(__m512i __A, __m128i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)__A, (__v4si)__B, __imm, (__v16si)__A, -1); } __funline __m512 _mm512_insertf32x4(__m512 __A, __m128 __B, const int __imm) { return (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)__A, (__v4sf)__B, __imm, (__v16sf)__A, -1); } __funline __m512i _mm512_inserti64x4(__m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x4_mask( (__v8di)__A, (__v4di)__B, __imm, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_inserti64x4(__m512i __W, __mmask8 __U, __m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x4_mask( (__v8di)__A, (__v4di)__B, __imm, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_inserti64x4(__mmask8 __U, __m512i __A, __m256i __B, const int __imm) { return (__m512i)__builtin_ia32_inserti64x4_mask( (__v8di)__A, (__v4di)__B, __imm, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512d _mm512_insertf64x4(__m512d __A, __m256d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x4_mask( (__v8df)__A, (__v4df)__B, __imm, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_insertf64x4(__m512d __W, __mmask8 __U, __m512d __A, __m256d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x4_mask( (__v8df)__A, (__v4df)__B, __imm, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_insertf64x4(__mmask8 __U, __m512d __A, __m256d __B, const int __imm) { return (__m512d)__builtin_ia32_insertf64x4_mask( (__v8df)__A, (__v4df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } #else #define _mm512_insertf32x4(X, Y, C) \ ((__m512)__builtin_ia32_insertf32x4_mask( \ (__v16sf)(__m512)(X), (__v4sf)(__m128)(Y), (int)(C), \ (__v16sf)(__m512)(X), (__mmask16)(-1))) #define _mm512_inserti32x4(X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x4_mask( \ (__v16si)(__m512i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v16si)(__m512i)(X), (__mmask16)(-1))) #define _mm512_insertf64x4(X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x4_mask( \ (__v8df)(__m512d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_undefined_pd(), (__mmask8)-1)) #define _mm512_mask_insertf64x4(W, U, X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x4_mask( \ (__v8df)(__m512d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_insertf64x4(U, X, Y, C) \ ((__m512d)__builtin_ia32_insertf64x4_mask( \ (__v8df)(__m512d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_setzero_pd(), (__mmask8)(U))) #define _mm512_inserti64x4(X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x4_mask( \ (__v8di)(__m512i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v8di)(__m512i)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_inserti64x4(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x4_mask( \ (__v8di)(__m512i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_inserti64x4(U, X, Y, C) \ ((__m512i)__builtin_ia32_inserti64x4_mask( \ (__v8di)(__m512i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v8di)(__m512i)_mm512_setzero_si512(), (__mmask8)(U))) #endif __funline __m512d _mm512_loadu_pd(void const *__P) { return *(__m512d_u *)__P; } __funline __m512d _mm512_mask_loadu_pd(__m512d __W, __mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_loadupd512_mask((const double *)__P, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_loadupd512_mask( (const double *)__P, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline void _mm512_storeu_pd(void *__P, __m512d __A) { *(__m512d_u *)__P = __A; } __funline void _mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A) { __builtin_ia32_storeupd512_mask((double *)__P, (__v8df)__A, (__mmask8)__U); } __funline __m512 _mm512_loadu_ps(void const *__P) { return *(__m512_u *)__P; } __funline __m512 _mm512_mask_loadu_ps(__m512 __W, __mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_loadups512_mask((const float *)__P, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_loadu_ps(__mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_loadups512_mask( (const float *)__P, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline void _mm512_storeu_ps(void *__P, __m512 __A) { *(__m512_u *)__P = __A; } __funline void _mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A) { __builtin_ia32_storeups512_mask((float *)__P, (__v16sf)__A, (__mmask16)__U); } __funline __m128 _mm_mask_load_ss(__m128 __W, __mmask8 __U, const float *__P) { return (__m128)__builtin_ia32_loadss_mask(__P, (__v4sf)__W, __U); } __funline __m128 _mm_maskz_load_ss(__mmask8 __U, const float *__P) { return (__m128)__builtin_ia32_loadss_mask(__P, (__v4sf)_mm_setzero_ps(), __U); } __funline __m128d _mm_mask_load_sd(__m128d __W, __mmask8 __U, const double *__P) { return (__m128d)__builtin_ia32_loadsd_mask(__P, (__v2df)__W, __U); } __funline __m128d _mm_maskz_load_sd(__mmask8 __U, const double *__P) { return (__m128d)__builtin_ia32_loadsd_mask(__P, (__v2df)_mm_setzero_pd(), __U); } __funline __m128 _mm_mask_move_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movess_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, __U); } __funline __m128 _mm_maskz_move_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_movess_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), __U); } __funline __m128d _mm_mask_move_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_movesd_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, __U); } __funline __m128d _mm_maskz_move_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_movesd_mask((__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), __U); } __funline void _mm_mask_store_ss(float *__P, __mmask8 __U, __m128 __A) { __builtin_ia32_storess_mask(__P, (__v4sf)__A, (__mmask8)__U); } __funline void _mm_mask_store_sd(double *__P, __mmask8 __U, __m128d __A) { __builtin_ia32_storesd_mask(__P, (__v2df)__A, (__mmask8)__U); } __funline __m512i _mm512_mask_loadu_epi64(__m512i __W, __mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddqudi512_mask((const long long *)__P, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddqudi512_mask( (const long long *)__P, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline void _mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) { __builtin_ia32_storedqudi512_mask((long long *)__P, (__v8di)__A, (__mmask8)__U); } __funline __m512i _mm512_loadu_si512(void const *__P) { return *(__m512i_u *)__P; } __funline __m512i _mm512_mask_loadu_epi32(__m512i __W, __mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddqusi512_mask( (const int *)__P, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddqusi512_mask( (const int *)__P, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline void _mm512_storeu_si512(void *__P, __m512i __A) { *(__m512i_u *)__P = __A; } __funline void _mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) { __builtin_ia32_storedqusi512_mask((int *)__P, (__v16si)__A, (__mmask16)__U); } __funline __m512d _mm512_permutevar_pd(__m512d __A, __m512i __C) { return (__m512d)__builtin_ia32_vpermilvarpd512_mask( (__v8df)__A, (__v8di)__C, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C) { return (__m512d)__builtin_ia32_vpermilvarpd512_mask( (__v8df)__A, (__v8di)__C, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C) { return (__m512d)__builtin_ia32_vpermilvarpd512_mask( (__v8df)__A, (__v8di)__C, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_permutevar_ps(__m512 __A, __m512i __C) { return (__m512)__builtin_ia32_vpermilvarps512_mask( (__v16sf)__A, (__v16si)__C, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C) { return (__m512)__builtin_ia32_vpermilvarps512_mask( (__v16sf)__A, (__v16si)__C, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C) { return (__m512)__builtin_ia32_vpermilvarps512_mask( (__v16sf)__A, (__v16si)__C, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512i _mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varq512_mask((__v8di)__I /* idx */, (__v8di)__A, (__v8di)__B, (__mmask8)-1); } __funline __m512i _mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varq512_mask((__v8di)__I /* idx */, (__v8di)__A, (__v8di)__B, (__mmask8)__U); } __funline __m512i _mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpermi2varq512_mask((__v8di)__A, (__v8di)__I /* idx */, (__v8di)__B, (__mmask8)__U); } __funline __m512i _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varq512_maskz((__v8di)__I /* idx */, (__v8di)__A, (__v8di)__B, (__mmask8)__U); } __funline __m512i _mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2vard512_mask((__v16si)__I /* idx */, (__v16si)__A, (__v16si)__B, (__mmask16)-1); } __funline __m512i _mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2vard512_mask((__v16si)__I /* idx */, (__v16si)__A, (__v16si)__B, (__mmask16)__U); } __funline __m512i _mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpermi2vard512_mask((__v16si)__A, (__v16si)__I /* idx */, (__v16si)__B, (__mmask16)__U); } __funline __m512i _mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2vard512_maskz( (__v16si)__I /* idx */, (__v16si)__A, (__v16si)__B, (__mmask16)__U); } __funline __m512d _mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B) { return (__m512d)__builtin_ia32_vpermt2varpd512_mask((__v8di)__I /* idx */, (__v8df)__A, (__v8df)__B, (__mmask8)-1); } __funline __m512d _mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B) { return (__m512d)__builtin_ia32_vpermt2varpd512_mask((__v8di)__I /* idx */, (__v8df)__A, (__v8df)__B, (__mmask8)__U); } __funline __m512d _mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U, __m512d __B) { return (__m512d)__builtin_ia32_vpermi2varpd512_mask((__v8df)__A, (__v8di)__I /* idx */, (__v8df)__B, (__mmask8)__U); } __funline __m512d _mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I, __m512d __B) { return (__m512d)__builtin_ia32_vpermt2varpd512_maskz((__v8di)__I /* idx */, (__v8df)__A, (__v8df)__B, (__mmask8)__U); } __funline __m512 _mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) { return (__m512)__builtin_ia32_vpermt2varps512_mask((__v16si)__I /* idx */, (__v16sf)__A, (__v16sf)__B, (__mmask16)-1); } __funline __m512 _mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B) { return (__m512)__builtin_ia32_vpermt2varps512_mask((__v16si)__I /* idx */, (__v16sf)__A, (__v16sf)__B, (__mmask16)__U); } __funline __m512 _mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B) { return (__m512)__builtin_ia32_vpermi2varps512_mask((__v16sf)__A, (__v16si)__I /* idx */, (__v16sf)__B, (__mmask16)__U); } __funline __m512 _mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B) { return (__m512)__builtin_ia32_vpermt2varps512_maskz( (__v16si)__I /* idx */, (__v16sf)__A, (__v16sf)__B, (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m512d _mm512_permute_pd(__m512d __X, const int __C) { return (__m512d)__builtin_ia32_vpermilpd512_mask( (__v8df)__X, __C, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_permute_pd(__m512d __W, __mmask8 __U, __m512d __X, const int __C) { return (__m512d)__builtin_ia32_vpermilpd512_mask((__v8df)__X, __C, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_permute_pd(__mmask8 __U, __m512d __X, const int __C) { return (__m512d)__builtin_ia32_vpermilpd512_mask( (__v8df)__X, __C, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_permute_ps(__m512 __X, const int __C) { return (__m512)__builtin_ia32_vpermilps512_mask( (__v16sf)__X, __C, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_permute_ps(__m512 __W, __mmask16 __U, __m512 __X, const int __C) { return (__m512)__builtin_ia32_vpermilps512_mask((__v16sf)__X, __C, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_permute_ps(__mmask16 __U, __m512 __X, const int __C) { return (__m512)__builtin_ia32_vpermilps512_mask( (__v16sf)__X, __C, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } #else #define _mm512_permute_pd(X, C) \ ((__m512d)__builtin_ia32_vpermilpd512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v8df)(__m512d)_mm512_undefined_pd(), \ (__mmask8)(-1))) #define _mm512_mask_permute_pd(W, U, X, C) \ ((__m512d)__builtin_ia32_vpermilpd512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_permute_pd(U, X, C) \ ((__m512d)__builtin_ia32_vpermilpd512_mask( \ (__v8df)(__m512d)(X), (int)(C), (__v8df)(__m512d)_mm512_setzero_pd(), \ (__mmask8)(U))) #define _mm512_permute_ps(X, C) \ ((__m512)__builtin_ia32_vpermilps512_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v16sf)(__m512)_mm512_undefined_ps(), \ (__mmask16)(-1))) #define _mm512_mask_permute_ps(W, U, X, C) \ ((__m512)__builtin_ia32_vpermilps512_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v16sf)(__m512)(W), (__mmask16)(U))) #define _mm512_maskz_permute_ps(U, X, C) \ ((__m512)__builtin_ia32_vpermilps512_mask( \ (__v16sf)(__m512)(X), (int)(C), (__v16sf)(__m512)_mm512_setzero_ps(), \ (__mmask16)(U))) #endif #ifdef __OPTIMIZE__ __funline __m512i _mm512_permutex_epi64(__m512i __X, const int __I) { return (__m512i)__builtin_ia32_permdi512_mask( (__v8di)__X, __I, (__v8di)_mm512_undefined_epi32(), (__mmask8)(-1)); } __funline __m512i _mm512_mask_permutex_epi64(__m512i __W, __mmask8 __M, __m512i __X, const int __I) { return (__m512i)__builtin_ia32_permdi512_mask((__v8di)__X, __I, (__v8di)__W, (__mmask8)__M); } __funline __m512i _mm512_maskz_permutex_epi64(__mmask8 __M, __m512i __X, const int __I) { return (__m512i)__builtin_ia32_permdi512_mask( (__v8di)__X, __I, (__v8di)_mm512_setzero_si512(), (__mmask8)__M); } __funline __m512d _mm512_permutex_pd(__m512d __X, const int __M) { return (__m512d)__builtin_ia32_permdf512_mask( (__v8df)__X, __M, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_permutex_pd(__m512d __W, __mmask8 __U, __m512d __X, const int __M) { return (__m512d)__builtin_ia32_permdf512_mask((__v8df)__X, __M, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_permutex_pd(__mmask8 __U, __m512d __X, const int __M) { return (__m512d)__builtin_ia32_permdf512_mask( (__v8df)__X, __M, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } #else #define _mm512_permutex_pd(X, M) \ ((__m512d)__builtin_ia32_permdf512_mask( \ (__v8df)(__m512d)(X), (int)(M), (__v8df)(__m512d)_mm512_undefined_pd(), \ (__mmask8)-1)) #define _mm512_mask_permutex_pd(W, U, X, M) \ ((__m512d)__builtin_ia32_permdf512_mask( \ (__v8df)(__m512d)(X), (int)(M), (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_permutex_pd(U, X, M) \ ((__m512d)__builtin_ia32_permdf512_mask( \ (__v8df)(__m512d)(X), (int)(M), (__v8df)(__m512d)_mm512_setzero_pd(), \ (__mmask8)(U))) #define _mm512_permutex_epi64(X, I) \ ((__m512i)__builtin_ia32_permdi512_mask( \ (__v8di)(__m512i)(X), (int)(I), \ (__v8di)(__m512i)(_mm512_undefined_epi32()), (__mmask8)(-1))) #define _mm512_maskz_permutex_epi64(M, X, I) \ ((__m512i)__builtin_ia32_permdi512_mask( \ (__v8di)(__m512i)(X), (int)(I), \ (__v8di)(__m512i)(_mm512_setzero_si512()), (__mmask8)(M))) #define _mm512_mask_permutex_epi64(W, M, X, I) \ ((__m512i)__builtin_ia32_permdi512_mask( \ (__v8di)(__m512i)(X), (int)(I), (__v8di)(__m512i)(W), (__mmask8)(M))) #endif __funline __m512i _mm512_maskz_permutexvar_epi64(__mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvardi512_mask( (__v8di)__Y, (__v8di)__X, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_permutexvar_epi64(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvardi512_mask( (__v8di)__Y, (__v8di)__X, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_permutexvar_epi64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvardi512_mask((__v8di)__Y, (__v8di)__X, (__v8di)__W, __M); } __funline __m512i _mm512_maskz_permutexvar_epi32(__mmask16 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvarsi512_mask( (__v16si)__Y, (__v16si)__X, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_permutexvar_epi32(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvarsi512_mask( (__v16si)__Y, (__v16si)__X, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_permutexvar_epi32(__m512i __W, __mmask16 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_permvarsi512_mask((__v16si)__Y, (__v16si)__X, (__v16si)__W, __M); } __funline __m512d _mm512_permutexvar_pd(__m512i __X, __m512d __Y) { return (__m512d)__builtin_ia32_permvardf512_mask( (__v8df)__Y, (__v8di)__X, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_permutexvar_pd(__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y) { return (__m512d)__builtin_ia32_permvardf512_mask((__v8df)__Y, (__v8di)__X, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_permutexvar_pd(__mmask8 __U, __m512i __X, __m512d __Y) { return (__m512d)__builtin_ia32_permvardf512_mask( (__v8df)__Y, (__v8di)__X, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_permutexvar_ps(__m512i __X, __m512 __Y) { return (__m512)__builtin_ia32_permvarsf512_mask( (__v16sf)__Y, (__v16si)__X, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_permutexvar_ps(__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y) { return (__m512)__builtin_ia32_permvarsf512_mask((__v16sf)__Y, (__v16si)__X, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_permutexvar_ps(__mmask16 __U, __m512i __X, __m512 __Y) { return (__m512)__builtin_ia32_permvarsf512_mask( (__v16sf)__Y, (__v16si)__X, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m512 _mm512_shuffle_ps(__m512 __M, __m512 __V, const int __imm) { return (__m512)__builtin_ia32_shufps512_mask( (__v16sf)__M, (__v16sf)__V, __imm, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_shuffle_ps(__m512 __W, __mmask16 __U, __m512 __M, __m512 __V, const int __imm) { return (__m512)__builtin_ia32_shufps512_mask( (__v16sf)__M, (__v16sf)__V, __imm, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_shuffle_ps(__mmask16 __U, __m512 __M, __m512 __V, const int __imm) { return (__m512)__builtin_ia32_shufps512_mask( (__v16sf)__M, (__v16sf)__V, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512d _mm512_shuffle_pd(__m512d __M, __m512d __V, const int __imm) { return (__m512d)__builtin_ia32_shufpd512_mask((__v8df)__M, (__v8df)__V, __imm, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_shuffle_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V, const int __imm) { return (__m512d)__builtin_ia32_shufpd512_mask((__v8df)__M, (__v8df)__V, __imm, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_shuffle_pd(__mmask8 __U, __m512d __M, __m512d __V, const int __imm) { return (__m512d)__builtin_ia32_shufpd512_mask((__v8df)__M, (__v8df)__V, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512d _mm512_fixupimm_round_pd(__m512d __A, __m512d __B, __m512i __C, const int __imm, const int __R) { return (__m512d)__builtin_ia32_fixupimmpd512_mask( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)-1, __R); } __funline __m512d _mm512_mask_fixupimm_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512i __C, const int __imm, const int __R) { return (__m512d)__builtin_ia32_fixupimmpd512_mask( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_fixupimm_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512i __C, const int __imm, const int __R) { return (__m512d)__builtin_ia32_fixupimmpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)__U, __R); } __funline __m512 _mm512_fixupimm_round_ps(__m512 __A, __m512 __B, __m512i __C, const int __imm, const int __R) { return (__m512)__builtin_ia32_fixupimmps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)-1, __R); } __funline __m512 _mm512_mask_fixupimm_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512i __C, const int __imm, const int __R) { return (__m512)__builtin_ia32_fixupimmps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_fixupimm_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512i __C, const int __imm, const int __R) { return (__m512)__builtin_ia32_fixupimmps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)__U, __R); } __funline __m128d _mm_fixupimm_round_sd(__m128d __A, __m128d __B, __m128i __C, const int __imm, const int __R) { return (__m128d)__builtin_ia32_fixupimmsd_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)-1, __R); } __funline __m128d _mm_mask_fixupimm_round_sd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C, const int __imm, const int __R) { return (__m128d)__builtin_ia32_fixupimmsd_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_fixupimm_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C, const int __imm, const int __R) { return (__m128d)__builtin_ia32_fixupimmsd_maskz( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U, __R); } __funline __m128 _mm_fixupimm_round_ss(__m128 __A, __m128 __B, __m128i __C, const int __imm, const int __R) { return (__m128)__builtin_ia32_fixupimmss_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)-1, __R); } __funline __m128 _mm_mask_fixupimm_round_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C, const int __imm, const int __R) { return (__m128)__builtin_ia32_fixupimmss_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_fixupimm_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C, const int __imm, const int __R) { return (__m128)__builtin_ia32_fixupimmss_maskz( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U, __R); } #else #define _mm512_shuffle_pd(X, Y, C) \ ((__m512d)__builtin_ia32_shufpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_undefined_pd(), (__mmask8)-1)) #define _mm512_mask_shuffle_pd(W, U, X, Y, C) \ ((__m512d)__builtin_ia32_shufpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)(W), (__mmask8)(U))) #define _mm512_maskz_shuffle_pd(U, X, Y, C) \ ((__m512d)__builtin_ia32_shufpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(C), \ (__v8df)(__m512d)_mm512_setzero_pd(), (__mmask8)(U))) #define _mm512_shuffle_ps(X, Y, C) \ ((__m512)__builtin_ia32_shufps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_undefined_ps(), (__mmask16)-1)) #define _mm512_mask_shuffle_ps(W, U, X, Y, C) \ ((__m512)__builtin_ia32_shufps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)(W), (__mmask16)(U))) #define _mm512_maskz_shuffle_ps(U, X, Y, C) \ ((__m512)__builtin_ia32_shufps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(C), \ (__v16sf)(__m512)_mm512_setzero_ps(), (__mmask16)(U))) #define _mm512_fixupimm_round_pd(X, Y, Z, C, R) \ ((__m512d)__builtin_ia32_fixupimmpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(-1), (R))) #define _mm512_mask_fixupimm_round_pd(X, U, Y, Z, C, R) \ ((__m512d)__builtin_ia32_fixupimmpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(U), (R))) #define _mm512_maskz_fixupimm_round_pd(U, X, Y, Z, C, R) \ ((__m512d)__builtin_ia32_fixupimmpd512_maskz( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(U), (R))) #define _mm512_fixupimm_round_ps(X, Y, Z, C, R) \ ((__m512)__builtin_ia32_fixupimmps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(-1), (R))) #define _mm512_mask_fixupimm_round_ps(X, U, Y, Z, C, R) \ ((__m512)__builtin_ia32_fixupimmps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(U), (R))) #define _mm512_maskz_fixupimm_round_ps(U, X, Y, Z, C, R) \ ((__m512)__builtin_ia32_fixupimmps512_maskz( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(U), (R))) #define _mm_fixupimm_round_sd(X, Y, Z, C, R) \ ((__m128d)__builtin_ia32_fixupimmsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(-1), (R))) #define _mm_mask_fixupimm_round_sd(X, U, Y, Z, C, R) \ ((__m128d)__builtin_ia32_fixupimmsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U), (R))) #define _mm_maskz_fixupimm_round_sd(U, X, Y, Z, C, R) \ ((__m128d)__builtin_ia32_fixupimmsd_maskz( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U), (R))) #define _mm_fixupimm_round_ss(X, Y, Z, C, R) \ ((__m128)__builtin_ia32_fixupimmss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(-1), (R))) #define _mm_mask_fixupimm_round_ss(X, U, Y, Z, C, R) \ ((__m128)__builtin_ia32_fixupimmss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U), (R))) #define _mm_maskz_fixupimm_round_ss(U, X, Y, Z, C, R) \ ((__m128)__builtin_ia32_fixupimmss_maskz( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U), (R))) #endif __funline __m512 _mm512_movehdup_ps(__m512 __A) { return (__m512)__builtin_ia32_movshdup512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_movehdup_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movshdup512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_movehdup_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movshdup512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512 _mm512_moveldup_ps(__m512 __A) { return (__m512)__builtin_ia32_movsldup512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_moveldup_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movsldup512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_moveldup_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_movsldup512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512i _mm512_or_si512(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A | (__v16su)__B); } __funline __m512i _mm512_or_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A | (__v16su)__B); } __funline __m512i _mm512_mask_or_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pord512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_or_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pord512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_or_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A | (__v8du)__B); } __funline __m512i _mm512_mask_or_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_porq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_or_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_porq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_xor_si512(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A ^ (__v16su)__B); } __funline __m512i _mm512_xor_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A ^ (__v16su)__B); } __funline __m512i _mm512_mask_xor_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pxord512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_xor_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pxord512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_xor_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A ^ (__v8du)__B); } __funline __m512i _mm512_mask_xor_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pxorq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_xor_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pxorq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_rol_epi32(__m512i __A, const int __B) { return (__m512i)__builtin_ia32_prold512_mask( (__v16si)__A, __B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_rol_epi32(__m512i __W, __mmask16 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_prold512_mask((__v16si)__A, __B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_rol_epi32(__mmask16 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_prold512_mask( (__v16si)__A, __B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_ror_epi32(__m512i __A, int __B) { return (__m512i)__builtin_ia32_prord512_mask( (__v16si)__A, __B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_ror_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B) { return (__m512i)__builtin_ia32_prord512_mask((__v16si)__A, __B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_ror_epi32(__mmask16 __U, __m512i __A, int __B) { return (__m512i)__builtin_ia32_prord512_mask( (__v16si)__A, __B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_rol_epi64(__m512i __A, const int __B) { return (__m512i)__builtin_ia32_prolq512_mask( (__v8di)__A, __B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_rol_epi64(__m512i __W, __mmask8 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_prolq512_mask((__v8di)__A, __B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_rol_epi64(__mmask8 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_prolq512_mask( (__v8di)__A, __B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_ror_epi64(__m512i __A, int __B) { return (__m512i)__builtin_ia32_prorq512_mask( (__v8di)__A, __B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_ror_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B) { return (__m512i)__builtin_ia32_prorq512_mask((__v8di)__A, __B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_ror_epi64(__mmask8 __U, __m512i __A, int __B) { return (__m512i)__builtin_ia32_prorq512_mask( (__v8di)__A, __B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #else #define _mm512_rol_epi32(A, B) \ ((__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(A), (int)(B), \ (__v16si)_mm512_undefined_epi32(), \ (__mmask16)(-1))) #define _mm512_mask_rol_epi32(W, U, A, B) \ ((__m512i)__builtin_ia32_prold512_mask( \ (__v16si)(__m512i)(A), (int)(B), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_rol_epi32(U, A, B) \ ((__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(A), (int)(B), \ (__v16si)_mm512_setzero_si512(), \ (__mmask16)(U))) #define _mm512_ror_epi32(A, B) \ ((__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \ (__v16si)_mm512_undefined_epi32(), \ (__mmask16)(-1))) #define _mm512_mask_ror_epi32(W, U, A, B) \ ((__m512i)__builtin_ia32_prord512_mask( \ (__v16si)(__m512i)(A), (int)(B), (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_ror_epi32(U, A, B) \ ((__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \ (__v16si)_mm512_setzero_si512(), \ (__mmask16)(U))) #define _mm512_rol_epi64(A, B) \ ((__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)_mm512_undefined_epi32(), \ (__mmask8)(-1))) #define _mm512_mask_rol_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_rol_epi64(U, A, B) \ ((__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)_mm512_setzero_si512(), \ (__mmask8)(U))) #define _mm512_ror_epi64(A, B) \ ((__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)_mm512_undefined_epi32(), \ (__mmask8)(-1))) #define _mm512_mask_ror_epi64(W, U, A, B) \ ((__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_ror_epi64(U, A, B) \ ((__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ (__v8di)_mm512_setzero_si512(), \ (__mmask8)(U))) #endif __funline __m512i _mm512_and_si512(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A & (__v16su)__B); } __funline __m512i _mm512_and_epi32(__m512i __A, __m512i __B) { return (__m512i)((__v16su)__A & (__v16su)__B); } __funline __m512i _mm512_mask_and_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_and_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_and_epi64(__m512i __A, __m512i __B) { return (__m512i)((__v8du)__A & (__v8du)__B); } __funline __m512i _mm512_mask_and_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __U); } __funline __m512i _mm512_maskz_and_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_pd(), __U); } __funline __m512i _mm512_andnot_si512(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_andnot_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnd512_mask((__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_andnot_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __U); } __funline __m512i _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pandnq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_pd(), __U); } __funline __mmask16 _mm512_test_epi32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ptestmd512((__v16si)__A, (__v16si)__B, (__mmask16)-1); } __funline __mmask16 _mm512_mask_test_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ptestmd512((__v16si)__A, (__v16si)__B, __U); } __funline __mmask8 _mm512_test_epi64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ptestmq512((__v8di)__A, (__v8di)__B, (__mmask8)-1); } __funline __mmask8 _mm512_mask_test_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ptestmq512((__v8di)__A, (__v8di)__B, __U); } __funline __mmask16 _mm512_testn_epi32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ptestnmd512((__v16si)__A, (__v16si)__B, (__mmask16)-1); } __funline __mmask16 _mm512_mask_testn_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ptestnmd512((__v16si)__A, (__v16si)__B, __U); } __funline __mmask8 _mm512_testn_epi64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ptestnmq512((__v8di)__A, (__v8di)__B, (__mmask8)-1); } __funline __mmask8 _mm512_mask_testn_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ptestnmq512((__v8di)__A, (__v8di)__B, __U); } __funline __m512 _mm512_abs_ps(__m512 __A) { return (__m512)_mm512_and_epi32((__m512i)__A, _mm512_set1_epi32(0x7fffffff)); } __funline __m512 _mm512_mask_abs_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)_mm512_mask_and_epi32((__m512i)__W, __U, (__m512i)__A, _mm512_set1_epi32(0x7fffffff)); } __funline __m512d _mm512_abs_pd(__m512d __A) { return (__m512d)_mm512_and_epi64((__m512i)__A, _mm512_set1_epi64(0x7fffffffffffffffLL)); } __funline __m512d _mm512_mask_abs_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)_mm512_mask_and_epi64( (__m512i)__W, __U, (__m512i)__A, _mm512_set1_epi64(0x7fffffffffffffffLL)); } __funline __m512i _mm512_unpackhi_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhdq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhdq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhdq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_unpackhi_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhqdq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhqdq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhqdq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_unpacklo_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckldq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckldq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckldq512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_unpacklo_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklqdq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_unpacklo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklqdq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_unpacklo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklqdq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #ifdef __x86_64__ #ifdef __OPTIMIZE__ __funline unsigned long long _mm_cvt_roundss_u64(__m128 __A, const int __R) { return (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)__A, __R); } __funline long long _mm_cvt_roundss_si64(__m128 __A, const int __R) { return (long long)__builtin_ia32_vcvtss2si64((__v4sf)__A, __R); } __funline long long _mm_cvt_roundss_i64(__m128 __A, const int __R) { return (long long)__builtin_ia32_vcvtss2si64((__v4sf)__A, __R); } __funline unsigned long long _mm_cvtt_roundss_u64(__m128 __A, const int __R) { return (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)__A, __R); } __funline long long _mm_cvtt_roundss_i64(__m128 __A, const int __R) { return (long long)__builtin_ia32_vcvttss2si64((__v4sf)__A, __R); } __funline long long _mm_cvtt_roundss_si64(__m128 __A, const int __R) { return (long long)__builtin_ia32_vcvttss2si64((__v4sf)__A, __R); } #else #define _mm_cvt_roundss_u64(A, B) \ ((unsigned long long)__builtin_ia32_vcvtss2usi64(A, B)) #define _mm_cvt_roundss_si64(A, B) ((long long)__builtin_ia32_vcvtss2si64(A, B)) #define _mm_cvt_roundss_i64(A, B) ((long long)__builtin_ia32_vcvtss2si64(A, B)) #define _mm_cvtt_roundss_u64(A, B) \ ((unsigned long long)__builtin_ia32_vcvttss2usi64(A, B)) #define _mm_cvtt_roundss_i64(A, B) \ ((long long)__builtin_ia32_vcvttss2si64(A, B)) #define _mm_cvtt_roundss_si64(A, B) \ ((long long)__builtin_ia32_vcvttss2si64(A, B)) #endif #endif #ifdef __OPTIMIZE__ __funline unsigned _mm_cvt_roundss_u32(__m128 __A, const int __R) { return (unsigned)__builtin_ia32_vcvtss2usi32((__v4sf)__A, __R); } __funline int _mm_cvt_roundss_si32(__m128 __A, const int __R) { return (int)__builtin_ia32_vcvtss2si32((__v4sf)__A, __R); } __funline int _mm_cvt_roundss_i32(__m128 __A, const int __R) { return (int)__builtin_ia32_vcvtss2si32((__v4sf)__A, __R); } __funline unsigned _mm_cvtt_roundss_u32(__m128 __A, const int __R) { return (unsigned)__builtin_ia32_vcvttss2usi32((__v4sf)__A, __R); } __funline int _mm_cvtt_roundss_i32(__m128 __A, const int __R) { return (int)__builtin_ia32_vcvttss2si32((__v4sf)__A, __R); } __funline int _mm_cvtt_roundss_si32(__m128 __A, const int __R) { return (int)__builtin_ia32_vcvttss2si32((__v4sf)__A, __R); } #else #define _mm_cvt_roundss_u32(A, B) ((unsigned)__builtin_ia32_vcvtss2usi32(A, B)) #define _mm_cvt_roundss_si32(A, B) ((int)__builtin_ia32_vcvtss2si32(A, B)) #define _mm_cvt_roundss_i32(A, B) ((int)__builtin_ia32_vcvtss2si32(A, B)) #define _mm_cvtt_roundss_u32(A, B) \ ((unsigned)__builtin_ia32_vcvttss2usi32(A, B)) #define _mm_cvtt_roundss_si32(A, B) ((int)__builtin_ia32_vcvttss2si32(A, B)) #define _mm_cvtt_roundss_i32(A, B) ((int)__builtin_ia32_vcvttss2si32(A, B)) #endif #ifdef __x86_64__ #ifdef __OPTIMIZE__ __funline unsigned long long _mm_cvt_roundsd_u64(__m128d __A, const int __R) { return (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)__A, __R); } __funline long long _mm_cvt_roundsd_si64(__m128d __A, const int __R) { return (long long)__builtin_ia32_vcvtsd2si64((__v2df)__A, __R); } __funline long long _mm_cvt_roundsd_i64(__m128d __A, const int __R) { return (long long)__builtin_ia32_vcvtsd2si64((__v2df)__A, __R); } __funline unsigned long long _mm_cvtt_roundsd_u64(__m128d __A, const int __R) { return (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)__A, __R); } __funline long long _mm_cvtt_roundsd_si64(__m128d __A, const int __R) { return (long long)__builtin_ia32_vcvttsd2si64((__v2df)__A, __R); } __funline long long _mm_cvtt_roundsd_i64(__m128d __A, const int __R) { return (long long)__builtin_ia32_vcvttsd2si64((__v2df)__A, __R); } #else #define _mm_cvt_roundsd_u64(A, B) \ ((unsigned long long)__builtin_ia32_vcvtsd2usi64(A, B)) #define _mm_cvt_roundsd_si64(A, B) ((long long)__builtin_ia32_vcvtsd2si64(A, B)) #define _mm_cvt_roundsd_i64(A, B) ((long long)__builtin_ia32_vcvtsd2si64(A, B)) #define _mm_cvtt_roundsd_u64(A, B) \ ((unsigned long long)__builtin_ia32_vcvttsd2usi64(A, B)) #define _mm_cvtt_roundsd_si64(A, B) \ ((long long)__builtin_ia32_vcvttsd2si64(A, B)) #define _mm_cvtt_roundsd_i64(A, B) \ ((long long)__builtin_ia32_vcvttsd2si64(A, B)) #endif #endif #ifdef __OPTIMIZE__ __funline unsigned _mm_cvt_roundsd_u32(__m128d __A, const int __R) { return (unsigned)__builtin_ia32_vcvtsd2usi32((__v2df)__A, __R); } __funline int _mm_cvt_roundsd_si32(__m128d __A, const int __R) { return (int)__builtin_ia32_vcvtsd2si32((__v2df)__A, __R); } __funline int _mm_cvt_roundsd_i32(__m128d __A, const int __R) { return (int)__builtin_ia32_vcvtsd2si32((__v2df)__A, __R); } __funline unsigned _mm_cvtt_roundsd_u32(__m128d __A, const int __R) { return (unsigned)__builtin_ia32_vcvttsd2usi32((__v2df)__A, __R); } __funline int _mm_cvtt_roundsd_i32(__m128d __A, const int __R) { return (int)__builtin_ia32_vcvttsd2si32((__v2df)__A, __R); } __funline int _mm_cvtt_roundsd_si32(__m128d __A, const int __R) { return (int)__builtin_ia32_vcvttsd2si32((__v2df)__A, __R); } #else #define _mm_cvt_roundsd_u32(A, B) ((unsigned)__builtin_ia32_vcvtsd2usi32(A, B)) #define _mm_cvt_roundsd_si32(A, B) ((int)__builtin_ia32_vcvtsd2si32(A, B)) #define _mm_cvt_roundsd_i32(A, B) ((int)__builtin_ia32_vcvtsd2si32(A, B)) #define _mm_cvtt_roundsd_u32(A, B) \ ((unsigned)__builtin_ia32_vcvttsd2usi32(A, B)) #define _mm_cvtt_roundsd_si32(A, B) ((int)__builtin_ia32_vcvttsd2si32(A, B)) #define _mm_cvtt_roundsd_i32(A, B) ((int)__builtin_ia32_vcvttsd2si32(A, B)) #endif __funline __m512d _mm512_movedup_pd(__m512d __A) { return (__m512d)__builtin_ia32_movddup512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_movedup_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_movddup512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_movedup_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_movddup512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512d _mm512_unpacklo_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpcklpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpcklpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_unpacklo_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpcklpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512d _mm512_unpackhi_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpckhpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1); } __funline __m512d _mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpckhpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_unpckhpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_unpackhi_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpckhps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpckhps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_unpackhi_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpckhps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m512d _mm512_cvt_roundps_pd(__m256 __A, const int __R) { return (__m512d)__builtin_ia32_cvtps2pd512_mask( (__v8sf)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_cvt_roundps_pd(__m512d __W, __mmask8 __U, __m256 __A, const int __R) { return (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)__A, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_cvt_roundps_pd(__mmask8 __U, __m256 __A, const int __R) { return (__m512d)__builtin_ia32_cvtps2pd512_mask( (__v8sf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_cvt_roundph_ps(__m256i __A, const int __R) { return (__m512)__builtin_ia32_vcvtph2ps512_mask( (__v16hi)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_cvt_roundph_ps(__m512 __W, __mmask16 __U, __m256i __A, const int __R) { return (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)__A, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_cvt_roundph_ps(__mmask16 __U, __m256i __A, const int __R) { return (__m512)__builtin_ia32_vcvtph2ps512_mask( (__v16hi)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m256i _mm512_cvt_roundps_ph(__m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)_mm256_undefined_si256(), -1); } __funline __m256i _mm512_cvtps_ph(__m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)_mm256_undefined_si256(), -1); } __funline __m256i _mm512_mask_cvt_roundps_ph(__m256i __U, __mmask16 __W, __m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)__U, (__mmask16)__W); } __funline __m256i _mm512_mask_cvtps_ph(__m256i __U, __mmask16 __W, __m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)__U, (__mmask16)__W); } __funline __m256i _mm512_maskz_cvt_roundps_ph(__mmask16 __W, __m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)_mm256_setzero_si256(), (__mmask16)__W); } __funline __m256i _mm512_maskz_cvtps_ph(__mmask16 __W, __m512 __A, const int __I) { return (__m256i)__builtin_ia32_vcvtps2ph512_mask( (__v16sf)__A, __I, (__v16hi)_mm256_setzero_si256(), (__mmask16)__W); } #else #define _mm512_cvt_roundps_pd(A, B) \ (__m512d) \ __builtin_ia32_cvtps2pd512_mask(A, (__v8df)_mm512_undefined_pd(), -1, B) #define _mm512_mask_cvt_roundps_pd(W, U, A, B) \ (__m512d) __builtin_ia32_cvtps2pd512_mask(A, (__v8df)(W), U, B) #define _mm512_maskz_cvt_roundps_pd(U, A, B) \ (__m512d) \ __builtin_ia32_cvtps2pd512_mask(A, (__v8df)_mm512_setzero_pd(), U, B) #define _mm512_cvt_roundph_ps(A, B) \ (__m512) __builtin_ia32_vcvtph2ps512_mask( \ (__v16hi)(A), (__v16sf)_mm512_undefined_ps(), -1, B) #define _mm512_mask_cvt_roundph_ps(W, U, A, B) \ (__m512) __builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)(W), U, B) #define _mm512_maskz_cvt_roundph_ps(U, A, B) \ (__m512) __builtin_ia32_vcvtph2ps512_mask( \ (__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B) #define _mm512_cvt_roundps_ph(A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask( \ (__v16sf)(__m512)A, (int)(I), (__v16hi)_mm256_undefined_si256(), -1)) #define _mm512_cvtps_ph(A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask( \ (__v16sf)(__m512)A, (int)(I), (__v16hi)_mm256_undefined_si256(), -1)) #define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask( \ (__v16sf)(__m512)A, (int)(I), (__v16hi)(__m256i)(U), (__mmask16)(W))) #define _mm512_mask_cvtps_ph(U, W, A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask( \ (__v16sf)(__m512)A, (int)(I), (__v16hi)(__m256i)(U), (__mmask16)(W))) #define _mm512_maskz_cvt_roundps_ph(W, A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)A, (int)(I), \ (__v16hi)_mm256_setzero_si256(), \ (__mmask16)(W))) #define _mm512_maskz_cvtps_ph(W, A, I) \ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)A, (int)(I), \ (__v16hi)_mm256_setzero_si256(), \ (__mmask16)(W))) #endif #ifdef __OPTIMIZE__ __funline __m256 _mm512_cvt_roundpd_ps(__m512d __A, const int __R) { return (__m256)__builtin_ia32_cvtpd2ps512_mask( (__v8df)__A, (__v8sf)_mm256_undefined_ps(), (__mmask8)-1, __R); } __funline __m256 _mm512_mask_cvt_roundpd_ps(__m256 __W, __mmask8 __U, __m512d __A, const int __R) { return (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)__A, (__v8sf)__W, (__mmask8)__U, __R); } __funline __m256 _mm512_maskz_cvt_roundpd_ps(__mmask8 __U, __m512d __A, const int __R) { return (__m256)__builtin_ia32_cvtpd2ps512_mask( (__v8df)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, __R); } __funline __m128 _mm_cvt_roundsd_ss(__m128 __A, __m128d __B, const int __R) { return (__m128)__builtin_ia32_cvtsd2ss_round((__v4sf)__A, (__v2df)__B, __R); } __funline __m128d _mm_cvt_roundss_sd(__m128d __A, __m128 __B, const int __R) { return (__m128d)__builtin_ia32_cvtss2sd_round((__v2df)__A, (__v4sf)__B, __R); } #else #define _mm512_cvt_roundpd_ps(A, B) \ (__m256) \ __builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_undefined_ps(), -1, B) #define _mm512_mask_cvt_roundpd_ps(W, U, A, B) \ (__m256) __builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)(W), U, B) #define _mm512_maskz_cvt_roundpd_ps(U, A, B) \ (__m256) __builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_setzero_ps(), U, B) #define _mm_cvt_roundsd_ss(A, B, C) \ (__m128) __builtin_ia32_cvtsd2ss_round(A, B, C) #define _mm_cvt_roundss_sd(A, B, C) \ (__m128d) __builtin_ia32_cvtss2sd_round(A, B, C) #endif __funline void _mm512_stream_si512(__m512i *__P, __m512i __A) { __builtin_ia32_movntdq512((__v8di *)__P, (__v8di)__A); } __funline void _mm512_stream_ps(float *__P, __m512 __A) { __builtin_ia32_movntps512(__P, (__v16sf)__A); } __funline void _mm512_stream_pd(double *__P, __m512d __A) { __builtin_ia32_movntpd512(__P, (__v8df)__A); } __funline __m512i _mm512_stream_load_si512(void *__P) { return __builtin_ia32_movntdqa512((__v8di *)__P); } typedef enum { _MM_MANT_NORM_1_2, _MM_MANT_NORM_p5_2, _MM_MANT_NORM_p5_1, _MM_MANT_NORM_p75_1p5 } _MM_MANTISSA_NORM_ENUM; typedef enum { _MM_MANT_SIGN_src, _MM_MANT_SIGN_zero, _MM_MANT_SIGN_nan } _MM_MANTISSA_SIGN_ENUM; #ifdef __OPTIMIZE__ __funline __m128 _mm_getexp_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_getexpss128_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_getexp_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_getexpss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_getexp_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_getexpss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } __funline __m128d _mm_getexp_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_getexpsd128_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_getexp_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_getexpsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_getexp_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_getexpsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m512 _mm512_getexp_round_ps(__m512 __A, const int __R) { return (__m512)__builtin_ia32_getexpps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_getexp_round_ps(__m512 __W, __mmask16 __U, __m512 __A, const int __R) { return (__m512)__builtin_ia32_getexpps512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U, __R); } __funline __m512 _mm512_maskz_getexp_round_ps(__mmask16 __U, __m512 __A, const int __R) { return (__m512)__builtin_ia32_getexpps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, __R); } __funline __m512d _mm512_getexp_round_pd(__m512d __A, const int __R) { return (__m512d)__builtin_ia32_getexppd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_getexp_round_pd(__m512d __W, __mmask8 __U, __m512d __A, const int __R) { return (__m512d)__builtin_ia32_getexppd512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U, __R); } __funline __m512d _mm512_maskz_getexp_round_pd(__mmask8 __U, __m512d __A, const int __R) { return (__m512d)__builtin_ia32_getexppd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, __R); } __funline __m512d _mm512_getmant_round_pd(__m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, _mm512_undefined_pd(), (__mmask8)-1, __R); } __funline __m512d _mm512_mask_getmant_round_pd(__m512d __W, __mmask8 __U, __m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, (__v8df)__W, __U, __R); } __funline __m512d _mm512_maskz_getmant_round_pd(__mmask8 __U, __m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, (__v8df)_mm512_setzero_pd(), __U, __R); } __funline __m512 _mm512_getmant_round_ps(__m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, _mm512_undefined_ps(), (__mmask16)-1, __R); } __funline __m512 _mm512_mask_getmant_round_ps(__m512 __W, __mmask16 __U, __m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, (__v16sf)__W, __U, __R); } __funline __m512 _mm512_maskz_getmant_round_ps(__mmask16 __U, __m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C, const int __R) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, (__v16sf)_mm512_setzero_ps(), __U, __R); } __funline __m128d _mm_getmant_round_sd(__m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128d)__builtin_ia32_getmantsd_round((__v2df)__A, (__v2df)__B, (__D << 2) | __C, __R); } __funline __m128d _mm_mask_getmant_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128d)__builtin_ia32_getmantsd_mask_round( (__v2df)__A, (__v2df)__B, (__D << 2) | __C, (__v2df)__W, __U, __R); } __funline __m128d _mm_maskz_getmant_round_sd(__mmask8 __U, __m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128d)__builtin_ia32_getmantsd_mask_round( (__v2df)__A, (__v2df)__B, (__D << 2) | __C, (__v2df)_mm_setzero_pd(), __U, __R); } __funline __m128 _mm_getmant_round_ss(__m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128)__builtin_ia32_getmantss_round((__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, __R); } __funline __m128 _mm_mask_getmant_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128)__builtin_ia32_getmantss_mask_round( (__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, (__v4sf)__W, __U, __R); } __funline __m128 _mm_maskz_getmant_round_ss(__mmask8 __U, __m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D, const int __R) { return (__m128)__builtin_ia32_getmantss_mask_round( (__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, (__v4sf)_mm_setzero_ps(), __U, __R); } #else #define _mm512_getmant_round_pd(X, B, C, R) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), \ (__v8df)(__m512d)_mm512_undefined_pd(), (__mmask8)-1, (R))) #define _mm512_mask_getmant_round_pd(W, U, X, B, C, R) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), (__v8df)(__m512d)(W), \ (__mmask8)(U), (R))) #define _mm512_maskz_getmant_round_pd(U, X, B, C, R) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), \ (__v8df)(__m512d)_mm512_setzero_pd(), (__mmask8)(U), (R))) #define _mm512_getmant_round_ps(X, B, C, R) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), \ (__v16sf)(__m512)_mm512_undefined_ps(), (__mmask16)-1, (R))) #define _mm512_mask_getmant_round_ps(W, U, X, B, C, R) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), (__v16sf)(__m512)(W), \ (__mmask16)(U), (R))) #define _mm512_maskz_getmant_round_ps(U, X, B, C, R) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), \ (__v16sf)(__m512)_mm512_setzero_ps(), (__mmask16)(U), (R))) #define _mm_getmant_round_sd(X, Y, C, D, R) \ ((__m128d)__builtin_ia32_getmantsd_round((__v2df)(__m128d)(X), \ (__v2df)(__m128d)(Y), \ (int)(((D) << 2) | (C)), (R))) #define _mm_mask_getmant_round_sd(W, U, X, Y, C, D, R) \ ((__m128d)__builtin_ia32_getmantsd_mask_round( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(((D) << 2) | (C)), \ (__v2df)(__m128d)(W), (__mmask8)(U), (R))) #define _mm_maskz_getmant_round_sd(U, X, Y, C, D, R) \ ((__m128d)__builtin_ia32_getmantsd_mask_round( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(((D) << 2) | (C)), \ (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)(U), (R))) #define _mm_getmant_round_ss(X, Y, C, D, R) \ ((__m128)__builtin_ia32_getmantss_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), (R))) #define _mm_mask_getmant_round_ss(W, U, X, Y, C, D, R) \ ((__m128)__builtin_ia32_getmantss_mask_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), \ (__v4sf)(__m128)(W), (__mmask8)(U), (R))) #define _mm_maskz_getmant_round_ss(U, X, Y, C, D, R) \ ((__m128)__builtin_ia32_getmantss_mask_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), \ (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)(U), (R))) #define _mm_getexp_round_ss(A, B, R) \ ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), R)) #define _mm_mask_getexp_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_getexpss_mask_round(A, B, W, U, C) #define _mm_maskz_getexp_round_ss(U, A, B, C) \ (__m128) \ __builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #define _mm_getexp_round_sd(A, B, R) \ ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), \ (__v2df)(__m128d)(B), R)) #define _mm_mask_getexp_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_getexpsd_mask_round(A, B, W, U, C) #define _mm_maskz_getexp_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm512_getexp_round_ps(A, R) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, R)) #define _mm512_mask_getexp_round_ps(W, U, A, R) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(W), (__mmask16)(U), R)) #define _mm512_maskz_getexp_round_ps(U, A, R) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), R)) #define _mm512_getexp_round_pd(A, R) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)_mm512_undefined_pd(), (__mmask8)-1, R)) #define _mm512_mask_getexp_round_pd(W, U, A, R) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(W), (__mmask8)(U), R)) #define _mm512_maskz_getexp_round_pd(U, A, R) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)_mm512_setzero_pd(), (__mmask8)(U), R)) #endif #ifdef __OPTIMIZE__ __funline __m512 _mm512_roundscale_round_ps(__m512 __A, const int __imm, const int __R) { return (__m512)__builtin_ia32_rndscaleps_mask( (__v16sf)__A, __imm, (__v16sf)_mm512_undefined_ps(), -1, __R); } __funline __m512 _mm512_mask_roundscale_round_ps(__m512 __A, __mmask16 __B, __m512 __C, const int __imm, const int __R) { return (__m512)__builtin_ia32_rndscaleps_mask( (__v16sf)__C, __imm, (__v16sf)__A, (__mmask16)__B, __R); } __funline __m512 _mm512_maskz_roundscale_round_ps(__mmask16 __A, __m512 __B, const int __imm, const int __R) { return (__m512)__builtin_ia32_rndscaleps_mask( (__v16sf)__B, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)__A, __R); } __funline __m512d _mm512_roundscale_round_pd(__m512d __A, const int __imm, const int __R) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__A, __imm, (__v8df)_mm512_undefined_pd(), -1, __R); } __funline __m512d _mm512_mask_roundscale_round_pd(__m512d __A, __mmask8 __B, __m512d __C, const int __imm, const int __R) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__C, __imm, (__v8df)__A, (__mmask8)__B, __R); } __funline __m512d _mm512_maskz_roundscale_round_pd(__mmask8 __A, __m512d __B, const int __imm, const int __R) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__A, __R); } __funline __m128 _mm_roundscale_round_ss(__m128 __A, __m128 __B, const int __imm, const int __R) { return (__m128)__builtin_ia32_rndscaless_round((__v4sf)__A, (__v4sf)__B, __imm, __R); } __funline __m128d _mm_roundscale_round_sd(__m128d __A, __m128d __B, const int __imm, const int __R) { return (__m128d)__builtin_ia32_rndscalesd_round((__v2df)__A, (__v2df)__B, __imm, __R); } #else #define _mm512_roundscale_round_ps(A, B, R) \ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \ (__v16sf)_mm512_undefined_ps(), \ (__mmask16)(-1), R)) #define _mm512_mask_roundscale_round_ps(A, B, C, D, R) \ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(D), \ (__v16sf)(__m512)(A), \ (__mmask16)(B), R)) #define _mm512_maskz_roundscale_round_ps(A, B, C, R) \ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(C), \ (__v16sf)_mm512_setzero_ps(), \ (__mmask16)(A), R)) #define _mm512_roundscale_round_pd(A, B, R) \ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \ (__v8df)_mm512_undefined_pd(), \ (__mmask8)(-1), R)) #define _mm512_mask_roundscale_round_pd(A, B, C, D, R) \ ((__m512d)__builtin_ia32_rndscalepd_mask( \ (__v8df)(__m512d)(C), (int)(D), (__v8df)(__m512d)(A), (__mmask8)(B), R)) #define _mm512_maskz_roundscale_round_pd(A, B, C, R) \ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(C), \ (__v8df)_mm512_setzero_pd(), \ (__mmask8)(A), R)) #define _mm_roundscale_round_ss(A, B, C, R) \ ((__m128)__builtin_ia32_rndscaless_round((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), (int)(C), R)) #define _mm_roundscale_round_sd(A, B, C, R) \ ((__m128d)__builtin_ia32_rndscalesd_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), R)) #endif __funline __m512 _mm512_floor_ps(__m512 __A) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__A, _MM_FROUND_FLOOR, (__v16sf)__A, -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_floor_pd(__m512d __A) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__A, _MM_FROUND_FLOOR, (__v8df)__A, -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_ceil_ps(__m512 __A) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__A, _MM_FROUND_CEIL, (__v16sf)__A, -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_ceil_pd(__m512d __A) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__A, _MM_FROUND_CEIL, (__v8df)__A, -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_floor_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__A, _MM_FROUND_FLOOR, (__v16sf)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_floor_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)__A, _MM_FROUND_FLOOR, (__v8df)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_ceil_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__A, _MM_FROUND_CEIL, (__v16sf)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_ceil_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__A, _MM_FROUND_CEIL, (__v8df)__W, __U, _MM_FROUND_CUR_DIRECTION); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_alignr_epi32(__m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignd512_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_mask_alignr_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignd512_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_alignr_epi32(__mmask16 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignd512_mask( (__v16si)__A, (__v16si)__B, __imm, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_alignr_epi64(__m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignq512_mask( (__v8di)__A, (__v8di)__B, __imm, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_alignr_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignq512_mask((__v8di)__A, (__v8di)__B, __imm, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_alignr_epi64(__mmask8 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_alignq512_mask((__v8di)__A, (__v8di)__B, __imm, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } #else #define _mm512_alignr_epi32(X, Y, C) \ ((__m512i)__builtin_ia32_alignd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)_mm512_undefined_epi32(), (__mmask16)-1)) #define _mm512_mask_alignr_epi32(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_alignd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)(__m512i)(W), (__mmask16)(U))) #define _mm512_maskz_alignr_epi32(U, X, Y, C) \ ((__m512i)__builtin_ia32_alignd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(C), \ (__v16si)_mm512_setzero_si512(), (__mmask16)(U))) #define _mm512_alignr_epi64(X, Y, C) \ ((__m512i)__builtin_ia32_alignq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)_mm512_undefined_epi32(), (__mmask8)-1)) #define _mm512_mask_alignr_epi64(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_alignq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)(__m512i)(W), (__mmask8)(U))) #define _mm512_maskz_alignr_epi64(U, X, Y, C) \ ((__m512i)__builtin_ia32_alignq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(C), \ (__v8di)_mm512_setzero_si512(), (__mmask8)(U))) #endif __funline __mmask16 _mm512_cmpeq_epi32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__A, (__v16si)__B, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpeq_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_pcmpeqd512_mask((__v16si)__A, (__v16si)__B, __U); } __funline __mmask8 _mm512_mask_cmpeq_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__A, (__v8di)__B, __U); } __funline __mmask8 _mm512_cmpeq_epi64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__A, (__v8di)__B, (__mmask8)-1); } __funline __mmask16 _mm512_cmpgt_epi32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__A, (__v16si)__B, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpgt_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__A, (__v16si)__B, __U); } __funline __mmask8 _mm512_mask_cmpgt_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__A, (__v8di)__B, __U); } __funline __mmask8 _mm512_cmpgt_epi64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__A, (__v8di)__B, (__mmask8)-1); } __funline __mmask16 _mm512_cmpge_epi32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 5, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpge_epi32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm512_mask_cmpge_epu32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 5, (__mmask16)__M); } __funline __mmask16 _mm512_cmpge_epu32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 5, (__mmask16)-1); } __funline __mmask8 _mm512_mask_cmpge_epi64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm512_cmpge_epi64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm512_mask_cmpge_epu64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm512_cmpge_epu64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 5, (__mmask8)-1); } __funline __mmask16 _mm512_mask_cmple_epi32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 2, (__mmask16)__M); } __funline __mmask16 _mm512_cmple_epi32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 2, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmple_epu32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 2, (__mmask16)__M); } __funline __mmask16 _mm512_cmple_epu32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 2, (__mmask16)-1); } __funline __mmask8 _mm512_mask_cmple_epi64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm512_cmple_epi64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm512_mask_cmple_epu64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm512_cmple_epu64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 2, (__mmask8)-1); } __funline __mmask16 _mm512_mask_cmplt_epi32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm512_cmplt_epi32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 1, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmplt_epu32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 1, (__mmask16)__M); } __funline __mmask16 _mm512_cmplt_epu32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 1, (__mmask16)-1); } __funline __mmask8 _mm512_mask_cmplt_epi64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm512_cmplt_epi64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm512_mask_cmplt_epu64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm512_cmplt_epu64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 1, (__mmask8)-1); } __funline __mmask16 _mm512_cmpneq_epi32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 4, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpneq_epi32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm512_mask_cmpneq_epu32_mask(__mmask16 __M, __m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 4, (__mmask16)__M); } __funline __mmask16 _mm512_cmpneq_epu32_mask(__m512i __X, __m512i __Y) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, 4, (__mmask16)-1); } __funline __mmask8 _mm512_mask_cmpneq_epi64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm512_cmpneq_epi64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm512_mask_cmpneq_epu64_mask(__mmask8 __M, __m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm512_cmpneq_epu64_mask(__m512i __X, __m512i __Y) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, 4, (__mmask8)-1); } #define _MM_CMPINT_EQ 0x0 #define _MM_CMPINT_LT 0x1 #define _MM_CMPINT_LE 0x2 #define _MM_CMPINT_UNUSED 0x3 #define _MM_CMPINT_NE 0x4 #define _MM_CMPINT_NLT 0x5 #define _MM_CMPINT_GE 0x5 #define _MM_CMPINT_NLE 0x6 #define _MM_CMPINT_GT 0x6 #ifdef __OPTIMIZE__ __funline __mmask16 _kshiftli_mask16(__mmask16 __A, unsigned int __B) { return (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)__A, (__mmask8)__B); } __funline __mmask16 _kshiftri_mask16(__mmask16 __A, unsigned int __B) { return (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)__A, (__mmask8)__B); } __funline __mmask8 _mm512_cmp_epi64_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, __P, (__mmask8)-1); } __funline __mmask16 _mm512_cmp_epi32_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, __P, (__mmask16)-1); } __funline __mmask8 _mm512_cmp_epu64_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, __P, (__mmask8)-1); } __funline __mmask16 _mm512_cmp_epu32_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, __P, (__mmask16)-1); } __funline __mmask8 _mm512_cmp_round_pd_mask(__m512d __X, __m512d __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, __P, (__mmask8)-1, __R); } __funline __mmask16 _mm512_cmp_round_ps_mask(__m512 __X, __m512 __Y, const int __P, const int __R) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, __P, (__mmask16)-1, __R); } __funline __mmask8 _mm512_mask_cmp_epi64_mask(__mmask8 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__X, (__v8di)__Y, __P, (__mmask8)__U); } __funline __mmask16 _mm512_mask_cmp_epi32_mask(__mmask16 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__X, (__v16si)__Y, __P, (__mmask16)__U); } __funline __mmask8 _mm512_mask_cmp_epu64_mask(__mmask8 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__X, (__v8di)__Y, __P, (__mmask8)__U); } __funline __mmask16 _mm512_mask_cmp_epu32_mask(__mmask16 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__X, (__v16si)__Y, __P, (__mmask16)__U); } __funline __mmask8 _mm512_mask_cmp_round_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, __P, (__mmask8)__U, __R); } __funline __mmask16 _mm512_mask_cmp_round_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y, const int __P, const int __R) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, __P, (__mmask16)__U, __R); } __funline __mmask8 _mm_cmp_round_sd_mask(__m128d __X, __m128d __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)__X, (__v2df)__Y, __P, (__mmask8)-1, __R); } __funline __mmask8 _mm_mask_cmp_round_sd_mask(__mmask8 __M, __m128d __X, __m128d __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)__X, (__v2df)__Y, __P, (__mmask8)__M, __R); } __funline __mmask8 _mm_cmp_round_ss_mask(__m128 __X, __m128 __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)-1, __R); } __funline __mmask8 _mm_mask_cmp_round_ss_mask(__mmask8 __M, __m128 __X, __m128 __Y, const int __P, const int __R) { return (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)__M, __R); } #else #define _kshiftli_mask16(X, Y) \ ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(X), (__mmask8)(Y))) #define _kshiftri_mask16(X, Y) \ ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(X), (__mmask8)(Y))) #define _mm512_cmp_epi64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(P), (__mmask8)-1)) #define _mm512_cmp_epi32_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_cmpd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(P), (__mmask16)-1)) #define _mm512_cmp_epu64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(P), (__mmask8)-1)) #define _mm512_cmp_epu32_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(P), (__mmask16)-1)) #define _mm512_cmp_round_pd_mask(X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmppd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(P), (__mmask8)-1, R)) #define _mm512_cmp_round_ps_mask(X, Y, P, R) \ ((__mmask16)__builtin_ia32_cmpps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(P), (__mmask16)-1, R)) #define _mm512_mask_cmp_epi64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(P), (__mmask8)M)) #define _mm512_mask_cmp_epi32_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_cmpd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(P), (__mmask16)M)) #define _mm512_mask_cmp_epu64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(P), (__mmask8)M)) #define _mm512_mask_cmp_epu32_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_ucmpd512_mask( \ (__v16si)(__m512i)(X), (__v16si)(__m512i)(Y), (int)(P), (__mmask16)M)) #define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmppd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(P), (__mmask8)M, R)) #define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R) \ ((__mmask16)__builtin_ia32_cmpps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(P), (__mmask16)M, R)) #define _mm_cmp_round_sd_mask(X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmpsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(P), (__mmask8)-1, R)) #define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmpsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(P), (M), R)) #define _mm_cmp_round_ss_mask(X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmpss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(P), (__mmask8)-1, R)) #define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ (__v4sf)(__m128)(Y), (int)(P), (M), R)) #endif #ifdef __OPTIMIZE__ __funline __m512 _mm512_i32gather_ps(__m512i __index, void const *__addr, int __scale) { __m512 __v1_old = _mm512_undefined_ps(); __mmask16 __mask = 0xFFFF; return (__m512)__builtin_ia32_gathersiv16sf( (__v16sf)__v1_old, __addr, (__v16si)__index, __mask, __scale); } __funline __m512 _mm512_mask_i32gather_ps(__m512 __v1_old, __mmask16 __mask, __m512i __index, void const *__addr, int __scale) { return (__m512)__builtin_ia32_gathersiv16sf( (__v16sf)__v1_old, __addr, (__v16si)__index, __mask, __scale); } __funline __m512d _mm512_i32gather_pd(__m256i __index, void const *__addr, int __scale) { __m512d __v1_old = _mm512_undefined_pd(); __mmask8 __mask = 0xFF; return (__m512d)__builtin_ia32_gathersiv8df((__v8df)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m512d _mm512_mask_i32gather_pd(__m512d __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m512d)__builtin_ia32_gathersiv8df((__v8df)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m256 _mm512_i64gather_ps(__m512i __index, void const *__addr, int __scale) { __m256 __v1_old = _mm256_undefined_ps(); __mmask8 __mask = 0xFF; return (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m256 _mm512_mask_i64gather_ps(__m256 __v1_old, __mmask8 __mask, __m512i __index, void const *__addr, int __scale) { return (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m512d _mm512_i64gather_pd(__m512i __index, void const *__addr, int __scale) { __m512d __v1_old = _mm512_undefined_pd(); __mmask8 __mask = 0xFF; return (__m512d)__builtin_ia32_gatherdiv8df((__v8df)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m512d _mm512_mask_i64gather_pd(__m512d __v1_old, __mmask8 __mask, __m512i __index, void const *__addr, int __scale) { return (__m512d)__builtin_ia32_gatherdiv8df((__v8df)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m512i _mm512_i32gather_epi32(__m512i __index, void const *__addr, int __scale) { __m512i __v1_old = _mm512_undefined_epi32(); __mmask16 __mask = 0xFFFF; return (__m512i)__builtin_ia32_gathersiv16si( (__v16si)__v1_old, __addr, (__v16si)__index, __mask, __scale); } __funline __m512i _mm512_mask_i32gather_epi32(__m512i __v1_old, __mmask16 __mask, __m512i __index, void const *__addr, int __scale) { return (__m512i)__builtin_ia32_gathersiv16si( (__v16si)__v1_old, __addr, (__v16si)__index, __mask, __scale); } __funline __m512i _mm512_i32gather_epi64(__m256i __index, void const *__addr, int __scale) { __m512i __v1_old = _mm512_undefined_epi32(); __mmask8 __mask = 0xFF; return (__m512i)__builtin_ia32_gathersiv8di((__v8di)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m512i _mm512_mask_i32gather_epi64(__m512i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m512i)__builtin_ia32_gathersiv8di((__v8di)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m256i _mm512_i64gather_epi32(__m512i __index, void const *__addr, int __scale) { __m256i __v1_old = _mm256_undefined_si256(); __mmask8 __mask = 0xFF; return (__m256i)__builtin_ia32_gatherdiv16si( (__v8si)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m256i _mm512_mask_i64gather_epi32(__m256i __v1_old, __mmask8 __mask, __m512i __index, void const *__addr, int __scale) { return (__m256i)__builtin_ia32_gatherdiv16si( (__v8si)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m512i _mm512_i64gather_epi64(__m512i __index, void const *__addr, int __scale) { __m512i __v1_old = _mm512_undefined_epi32(); __mmask8 __mask = 0xFF; return (__m512i)__builtin_ia32_gatherdiv8di((__v8di)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline __m512i _mm512_mask_i64gather_epi64(__m512i __v1_old, __mmask8 __mask, __m512i __index, void const *__addr, int __scale) { return (__m512i)__builtin_ia32_gatherdiv8di((__v8di)__v1_old, __addr, (__v8di)__index, __mask, __scale); } __funline void _mm512_i32scatter_ps(void *__addr, __m512i __index, __m512 __v1, int __scale) { __builtin_ia32_scattersiv16sf(__addr, (__mmask16)0xFFFF, (__v16si)__index, (__v16sf)__v1, __scale); } __funline void _mm512_mask_i32scatter_ps(void *__addr, __mmask16 __mask, __m512i __index, __m512 __v1, int __scale) { __builtin_ia32_scattersiv16sf(__addr, __mask, (__v16si)__index, (__v16sf)__v1, __scale); } __funline void _mm512_i32scatter_pd(void *__addr, __m256i __index, __m512d __v1, int __scale) { __builtin_ia32_scattersiv8df(__addr, (__mmask8)0xFF, (__v8si)__index, (__v8df)__v1, __scale); } __funline void _mm512_mask_i32scatter_pd(void *__addr, __mmask8 __mask, __m256i __index, __m512d __v1, int __scale) { __builtin_ia32_scattersiv8df(__addr, __mask, (__v8si)__index, (__v8df)__v1, __scale); } __funline void _mm512_i64scatter_ps(void *__addr, __m512i __index, __m256 __v1, int __scale) { __builtin_ia32_scatterdiv16sf(__addr, (__mmask8)0xFF, (__v8di)__index, (__v8sf)__v1, __scale); } __funline void _mm512_mask_i64scatter_ps(void *__addr, __mmask8 __mask, __m512i __index, __m256 __v1, int __scale) { __builtin_ia32_scatterdiv16sf(__addr, __mask, (__v8di)__index, (__v8sf)__v1, __scale); } __funline void _mm512_i64scatter_pd(void *__addr, __m512i __index, __m512d __v1, int __scale) { __builtin_ia32_scatterdiv8df(__addr, (__mmask8)0xFF, (__v8di)__index, (__v8df)__v1, __scale); } __funline void _mm512_mask_i64scatter_pd(void *__addr, __mmask8 __mask, __m512i __index, __m512d __v1, int __scale) { __builtin_ia32_scatterdiv8df(__addr, __mask, (__v8di)__index, (__v8df)__v1, __scale); } __funline void _mm512_i32scatter_epi32(void *__addr, __m512i __index, __m512i __v1, int __scale) { __builtin_ia32_scattersiv16si(__addr, (__mmask16)0xFFFF, (__v16si)__index, (__v16si)__v1, __scale); } __funline void _mm512_mask_i32scatter_epi32(void *__addr, __mmask16 __mask, __m512i __index, __m512i __v1, int __scale) { __builtin_ia32_scattersiv16si(__addr, __mask, (__v16si)__index, (__v16si)__v1, __scale); } __funline void _mm512_i32scatter_epi64(void *__addr, __m256i __index, __m512i __v1, int __scale) { __builtin_ia32_scattersiv8di(__addr, (__mmask8)0xFF, (__v8si)__index, (__v8di)__v1, __scale); } __funline void _mm512_mask_i32scatter_epi64(void *__addr, __mmask8 __mask, __m256i __index, __m512i __v1, int __scale) { __builtin_ia32_scattersiv8di(__addr, __mask, (__v8si)__index, (__v8di)__v1, __scale); } __funline void _mm512_i64scatter_epi32(void *__addr, __m512i __index, __m256i __v1, int __scale) { __builtin_ia32_scatterdiv16si(__addr, (__mmask8)0xFF, (__v8di)__index, (__v8si)__v1, __scale); } __funline void _mm512_mask_i64scatter_epi32(void *__addr, __mmask8 __mask, __m512i __index, __m256i __v1, int __scale) { __builtin_ia32_scatterdiv16si(__addr, __mask, (__v8di)__index, (__v8si)__v1, __scale); } __funline void _mm512_i64scatter_epi64(void *__addr, __m512i __index, __m512i __v1, int __scale) { __builtin_ia32_scatterdiv8di(__addr, (__mmask8)0xFF, (__v8di)__index, (__v8di)__v1, __scale); } __funline void _mm512_mask_i64scatter_epi64(void *__addr, __mmask8 __mask, __m512i __index, __m512i __v1, int __scale) { __builtin_ia32_scatterdiv8di(__addr, __mask, (__v8di)__index, (__v8di)__v1, __scale); } #else #define _mm512_i32gather_ps(INDEX, ADDR, SCALE) \ (__m512) __builtin_ia32_gathersiv16sf( \ (__v16sf)_mm512_undefined_ps(), (void const *)ADDR, \ (__v16si)(__m512i)INDEX, (__mmask16)0xFFFF, (int)SCALE) #define _mm512_mask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512) __builtin_ia32_gathersiv16sf( \ (__v16sf)(__m512)V1OLD, (void const *)ADDR, (__v16si)(__m512i)INDEX, \ (__mmask16)MASK, (int)SCALE) #define _mm512_i32gather_pd(INDEX, ADDR, SCALE) \ (__m512d) __builtin_ia32_gathersiv8df( \ (__v8df)_mm512_undefined_pd(), (void const *)ADDR, \ (__v8si)(__m256i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512d) __builtin_ia32_gathersiv8df( \ (__v8df)(__m512d)V1OLD, (void const *)ADDR, (__v8si)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i64gather_ps(INDEX, ADDR, SCALE) \ (__m256) __builtin_ia32_gatherdiv16sf( \ (__v8sf)_mm256_undefined_ps(), (void const *)ADDR, \ (__v8di)(__m512i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256) __builtin_ia32_gatherdiv16sf( \ (__v8sf)(__m256)V1OLD, (void const *)ADDR, (__v8di)(__m512i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i64gather_pd(INDEX, ADDR, SCALE) \ (__m512d) __builtin_ia32_gatherdiv8df( \ (__v8df)_mm512_undefined_pd(), (void const *)ADDR, \ (__v8di)(__m512i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512d) __builtin_ia32_gatherdiv8df( \ (__v8df)(__m512d)V1OLD, (void const *)ADDR, (__v8di)(__m512i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i32gather_epi32(INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gathersiv16si( \ (__v16si)_mm512_undefined_epi32(), (void const *)ADDR, \ (__v16si)(__m512i)INDEX, (__mmask16)0xFFFF, (int)SCALE) #define _mm512_mask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gathersiv16si( \ (__v16si)(__m512i)V1OLD, (void const *)ADDR, (__v16si)(__m512i)INDEX, \ (__mmask16)MASK, (int)SCALE) #define _mm512_i32gather_epi64(INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gathersiv8di( \ (__v8di)_mm512_undefined_epi32(), (void const *)ADDR, \ (__v8si)(__m256i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gathersiv8di( \ (__v8di)(__m512i)V1OLD, (void const *)ADDR, (__v8si)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i64gather_epi32(INDEX, ADDR, SCALE) \ (__m256i) __builtin_ia32_gatherdiv16si( \ (__v8si)_mm256_undefined_si256(), (void const *)ADDR, \ (__v8di)(__m512i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256i) __builtin_ia32_gatherdiv16si( \ (__v8si)(__m256i)V1OLD, (void const *)ADDR, (__v8di)(__m512i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i64gather_epi64(INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gatherdiv8di( \ (__v8di)_mm512_undefined_epi32(), (void const *)ADDR, \ (__v8di)(__m512i)INDEX, (__mmask8)0xFF, (int)SCALE) #define _mm512_mask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m512i) __builtin_ia32_gatherdiv8di( \ (__v8di)(__m512i)V1OLD, (void const *)ADDR, (__v8di)(__m512i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm512_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv16sf((void *)ADDR, (__mmask16)0xFFFF, \ (__v16si)(__m512i)INDEX, (__v16sf)(__m512)V1, \ (int)SCALE) #define _mm512_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv16sf((void *)ADDR, (__mmask16)MASK, \ (__v16si)(__m512i)INDEX, (__v16sf)(__m512)V1, \ (int)SCALE) #define _mm512_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8df((void *)ADDR, (__mmask8)0xFF, \ (__v8si)(__m256i)INDEX, (__v8df)(__m512d)V1, \ (int)SCALE) #define _mm512_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8df((void *)ADDR, (__mmask8)MASK, \ (__v8si)(__m256i)INDEX, (__v8df)(__m512d)V1, \ (int)SCALE) #define _mm512_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv16sf((void *)ADDR, (__mmask8)0xFF, \ (__v8di)(__m512i)INDEX, (__v8sf)(__m256)V1, \ (int)SCALE) #define _mm512_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv16sf((void *)ADDR, (__mmask16)MASK, \ (__v8di)(__m512i)INDEX, (__v8sf)(__m256)V1, \ (int)SCALE) #define _mm512_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8df((void *)ADDR, (__mmask8)0xFF, \ (__v8di)(__m512i)INDEX, (__v8df)(__m512d)V1, \ (int)SCALE) #define _mm512_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8df((void *)ADDR, (__mmask8)MASK, \ (__v8di)(__m512i)INDEX, (__v8df)(__m512d)V1, \ (int)SCALE) #define _mm512_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv16si((void *)ADDR, (__mmask16)0xFFFF, \ (__v16si)(__m512i)INDEX, (__v16si)(__m512i)V1, \ (int)SCALE) #define _mm512_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv16si((void *)ADDR, (__mmask16)MASK, \ (__v16si)(__m512i)INDEX, (__v16si)(__m512i)V1, \ (int)SCALE) #define _mm512_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8di((void *)ADDR, (__mmask8)0xFF, \ (__v8si)(__m256i)INDEX, (__v8di)(__m512i)V1, \ (int)SCALE) #define _mm512_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8di((void *)ADDR, (__mmask8)MASK, \ (__v8si)(__m256i)INDEX, (__v8di)(__m512i)V1, \ (int)SCALE) #define _mm512_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv16si((void *)ADDR, (__mmask8)0xFF, \ (__v8di)(__m512i)INDEX, (__v8si)(__m256i)V1, \ (int)SCALE) #define _mm512_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv16si((void *)ADDR, (__mmask8)MASK, \ (__v8di)(__m512i)INDEX, (__v8si)(__m256i)V1, \ (int)SCALE) #define _mm512_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8di((void *)ADDR, (__mmask8)0xFF, \ (__v8di)(__m512i)INDEX, (__v8di)(__m512i)V1, \ (int)SCALE) #define _mm512_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8di((void *)ADDR, (__mmask8)MASK, \ (__v8di)(__m512i)INDEX, (__v8di)(__m512i)V1, \ (int)SCALE) #endif __funline __m512d _mm512_mask_compress_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_compressdf512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_compress_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_compressdf512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline void _mm512_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m512d __A) { __builtin_ia32_compressstoredf512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)__U); } __funline __m512 _mm512_mask_compress_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_compresssf512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_compress_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_compresssf512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline void _mm512_mask_compressstoreu_ps(void *__P, __mmask16 __U, __m512 __A) { __builtin_ia32_compressstoresf512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)__U); } __funline __m512i _mm512_mask_compress_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_compressdi512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_compress_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_compressdi512_mask( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline void _mm512_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m512i __A) { __builtin_ia32_compressstoredi512_mask((__v8di *)__P, (__v8di)__A, (__mmask8)__U); } __funline __m512i _mm512_mask_compress_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_compresssi512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_compress_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_compresssi512_mask( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline void _mm512_mask_compressstoreu_epi32(void *__P, __mmask16 __U, __m512i __A) { __builtin_ia32_compressstoresi512_mask((__v16si *)__P, (__v16si)__A, (__mmask16)__U); } __funline __m512d _mm512_mask_expand_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_expanddf512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_expand_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_expanddf512_maskz( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512d _mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_expandloaddf512_mask( (const __v8df *)__P, (__v8df)__W, (__mmask8)__U); } __funline __m512d _mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P) { return (__m512d)__builtin_ia32_expandloaddf512_maskz( (const __v8df *)__P, (__v8df)_mm512_setzero_pd(), (__mmask8)__U); } __funline __m512 _mm512_mask_expand_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_expandsf512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_expand_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_expandsf512_maskz( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512 _mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_expandloadsf512_mask( (const __v16sf *)__P, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P) { return (__m512)__builtin_ia32_expandloadsf512_maskz( (const __v16sf *)__P, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } __funline __m512i _mm512_mask_expand_epi64(__m512i __W, __mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_expanddi512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_expand_epi64(__mmask8 __U, __m512i __A) { return (__m512i)__builtin_ia32_expanddi512_maskz( (__v8di)__A, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_expandloaddi512_mask( (const __v8di *)__P, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) { return (__m512i)__builtin_ia32_expandloaddi512_maskz( (const __v8di *)__P, (__v8di)_mm512_setzero_si512(), (__mmask8)__U); } __funline __m512i _mm512_mask_expand_epi32(__m512i __W, __mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_expandsi512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_expand_epi32(__mmask16 __U, __m512i __A) { return (__m512i)__builtin_ia32_expandsi512_maskz( (__v16si)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_expandloadsi512_mask( (const __v16si *)__P, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P) { return (__m512i)__builtin_ia32_expandloadsi512_maskz( (const __v16si *)__P, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } /* Mask arithmetic operations */ #define _kand_mask16 _mm512_kand #define _kandn_mask16 _mm512_kandn #define _knot_mask16 _mm512_knot #define _kor_mask16 _mm512_kor #define _kxnor_mask16 _mm512_kxnor #define _kxor_mask16 _mm512_kxor __funline unsigned char _kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_kortestchi(__A, __B); return (unsigned char)__builtin_ia32_kortestzhi(__A, __B); } __funline unsigned char _kortestz_mask16_u8(__mmask16 __A, __mmask16 __B) { return (unsigned char)__builtin_ia32_kortestzhi((__mmask16)__A, (__mmask16)__B); } __funline unsigned char _kortestc_mask16_u8(__mmask16 __A, __mmask16 __B) { return (unsigned char)__builtin_ia32_kortestchi((__mmask16)__A, (__mmask16)__B); } __funline unsigned int _cvtmask16_u32(__mmask16 __A) { return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A); } __funline __mmask16 _cvtu32_mask16(unsigned int __A) { return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A); } __funline __mmask16 _load_mask16(__mmask16 *__A) { return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A); } __funline void _store_mask16(__mmask16 *__A, __mmask16 __B) { *(__mmask16 *)__A = __builtin_ia32_kmovw(__B); } __funline __mmask16 _mm512_kand(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kandhi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _mm512_kandn(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kandnhi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _mm512_kor(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_korhi((__mmask16)__A, (__mmask16)__B); } __funline int _mm512_kortestz(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kortestzhi((__mmask16)__A, (__mmask16)__B); } __funline int _mm512_kortestc(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kortestchi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _mm512_kxnor(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kxnorhi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _mm512_kxor(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kxorhi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _mm512_knot(__mmask16 __A) { return (__mmask16)__builtin_ia32_knothi((__mmask16)__A); } __funline __mmask16 _mm512_kunpackb(__mmask16 __A, __mmask16 __B) { return (__mmask16)__builtin_ia32_kunpckhi((__mmask16)__A, (__mmask16)__B); } __funline __mmask16 _kunpackb_mask16(__mmask8 __A, __mmask8 __B) { return (__mmask16)__builtin_ia32_kunpckhi((__mmask16)__A, (__mmask16)__B); } #ifdef __OPTIMIZE__ __funline __m512i _mm512_maskz_inserti32x4(__mmask16 __B, __m512i __C, __m128i __D, const int __imm) { return (__m512i)__builtin_ia32_inserti32x4_mask( (__v16si)__C, (__v4si)__D, __imm, (__v16si)_mm512_setzero_si512(), __B); } __funline __m512 _mm512_maskz_insertf32x4(__mmask16 __B, __m512 __C, __m128 __D, const int __imm) { return (__m512)__builtin_ia32_insertf32x4_mask( (__v16sf)__C, (__v4sf)__D, __imm, (__v16sf)_mm512_setzero_ps(), __B); } __funline __m512i _mm512_mask_inserti32x4(__m512i __A, __mmask16 __B, __m512i __C, __m128i __D, const int __imm) { return (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)__C, (__v4si)__D, __imm, (__v16si)__A, __B); } __funline __m512 _mm512_mask_insertf32x4(__m512 __A, __mmask16 __B, __m512 __C, __m128 __D, const int __imm) { return (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)__C, (__v4sf)__D, __imm, (__v16sf)__A, __B); } #else #define _mm512_maskz_insertf32x4(A, X, Y, C) \ ((__m512)__builtin_ia32_insertf32x4_mask( \ (__v16sf)(__m512)(X), (__v4sf)(__m128)(Y), (int)(C), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)(A))) #define _mm512_maskz_inserti32x4(A, X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x4_mask( \ (__v16si)(__m512i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v16si)_mm512_setzero_si512(), (__mmask16)(A))) #define _mm512_mask_insertf32x4(A, B, X, Y, C) \ ((__m512)__builtin_ia32_insertf32x4_mask( \ (__v16sf)(__m512)(X), (__v4sf)(__m128)(Y), (int)(C), \ (__v16sf)(__m512)(A), (__mmask16)(B))) #define _mm512_mask_inserti32x4(A, B, X, Y, C) \ ((__m512i)__builtin_ia32_inserti32x4_mask( \ (__v16si)(__m512i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v16si)(__m512i)(A), (__mmask16)(B))) #endif __funline __m512i _mm512_max_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_maskz_max_epi64(__mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_max_epi64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __M); } __funline __m512i _mm512_min_epi64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_min_epi64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __M); } __funline __m512i _mm512_maskz_min_epi64(__mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_max_epu64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_maskz_max_epu64(__mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_max_epu64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __M); } __funline __m512i _mm512_min_epu64(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1); } __funline __m512i _mm512_mask_min_epu64(__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuq512_mask((__v8di)__A, (__v8di)__B, (__v8di)__W, __M); } __funline __m512i _mm512_maskz_min_epu64(__mmask8 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuq512_mask( (__v8di)__A, (__v8di)__B, (__v8di)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_max_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_maskz_max_epi32(__mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_max_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, __M); } __funline __m512i _mm512_min_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_maskz_min_epi32(__mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsd512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_min_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsd512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, __M); } __funline __m512i _mm512_max_epu32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxud512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_maskz_max_epu32(__mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxud512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_max_epu32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxud512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, __M); } __funline __m512i _mm512_min_epu32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminud512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1); } __funline __m512i _mm512_maskz_min_epu32(__mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminud512_mask( (__v16si)__A, (__v16si)__B, (__v16si)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_min_epu32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminud512_mask((__v16si)__A, (__v16si)__B, (__v16si)__W, __M); } __funline __m512 _mm512_unpacklo_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpcklps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1); } __funline __m512 _mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpcklps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U); } __funline __m512 _mm512_maskz_unpacklo_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_unpcklps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m128d _mm_max_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_maxsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_max_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_maxsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_max_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_maxsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_max_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_maxss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_max_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_maxss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_max_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_maxss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } __funline __m128d _mm_min_round_sd(__m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_minsd_round((__v2df)__A, (__v2df)__B, __R); } __funline __m128d _mm_mask_min_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_minsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_min_round_sd(__mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_minsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, __R); } __funline __m128 _mm_min_round_ss(__m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_minss_round((__v4sf)__A, (__v4sf)__B, __R); } __funline __m128 _mm_mask_min_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_minss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_min_round_ss(__mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_minss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, __R); } #else #define _mm_max_round_sd(A, B, C) (__m128d) __builtin_ia32_maxsd_round(A, B, C) #define _mm_mask_max_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_maxsd_mask_round(A, B, W, U, C) #define _mm_maskz_max_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_maxsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_max_round_ss(A, B, C) (__m128) __builtin_ia32_maxss_round(A, B, C) #define _mm_mask_max_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_maxss_mask_round(A, B, W, U, C) #define _mm_maskz_max_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_maxss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #define _mm_min_round_sd(A, B, C) (__m128d) __builtin_ia32_minsd_round(A, B, C) #define _mm_mask_min_round_sd(W, U, A, B, C) \ (__m128d) __builtin_ia32_minsd_mask_round(A, B, W, U, C) #define _mm_maskz_min_round_sd(U, A, B, C) \ (__m128d) \ __builtin_ia32_minsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) #define _mm_min_round_ss(A, B, C) (__m128) __builtin_ia32_minss_round(A, B, C) #define _mm_mask_min_round_ss(W, U, A, B, C) \ (__m128) __builtin_ia32_minss_mask_round(A, B, W, U, C) #define _mm_maskz_min_round_ss(U, A, B, C) \ (__m128) __builtin_ia32_minss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) #endif __funline __m512d _mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W) { return (__m512d)__builtin_ia32_blendmpd_512_mask((__v8df)__A, (__v8df)__W, (__mmask8)__U); } __funline __m512 _mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W) { return (__m512)__builtin_ia32_blendmps_512_mask((__v16sf)__A, (__v16sf)__W, (__mmask16)__U); } __funline __m512i _mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W) { return (__m512i)__builtin_ia32_blendmq_512_mask((__v8di)__A, (__v8di)__W, (__mmask8)__U); } __funline __m512i _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W) { return (__m512i)__builtin_ia32_blendmd_512_mask((__v16si)__A, (__v16si)__W, (__mmask16)__U); } #ifdef __OPTIMIZE__ __funline __m128d _mm_fmadd_round_sd(__m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_round((__v2df)__W, (__v2df)__A, (__v2df)__B, __R); } __funline __m128 _mm_fmadd_round_ss(__m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_round((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, __R); } __funline __m128d _mm_fmsub_round_sd(__m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_round((__v2df)__W, (__v2df)__A, -(__v2df)__B, __R); } __funline __m128 _mm_fmsub_round_ss(__m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_round((__v4sf)__W, (__v4sf)__A, -(__v4sf)__B, __R); } __funline __m128d _mm_fnmadd_round_sd(__m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_round((__v2df)__W, -(__v2df)__A, (__v2df)__B, __R); } __funline __m128 _mm_fnmadd_round_ss(__m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_round((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, __R); } __funline __m128d _mm_fnmsub_round_sd(__m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_round((__v2df)__W, -(__v2df)__A, -(__v2df)__B, __R); } __funline __m128 _mm_fnmsub_round_ss(__m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_round((__v4sf)__W, -(__v4sf)__A, -(__v4sf)__B, __R); } #else #define _mm_fmadd_round_sd(A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_round(A, B, C, R) #define _mm_fmadd_round_ss(A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_round(A, B, C, R) #define _mm_fmsub_round_sd(A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_round(A, B, -(C), R) #define _mm_fmsub_round_ss(A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_round(A, B, -(C), R) #define _mm_fnmadd_round_sd(A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_round(A, -(B), C, R) #define _mm_fnmadd_round_ss(A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_round(A, -(B), C, R) #define _mm_fnmsub_round_sd(A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_round(A, -(B), -(C), R) #define _mm_fnmsub_round_ss(A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_round(A, -(B), -(C), R) #endif __funline __m128d _mm_mask_fmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_fmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask3_fmadd_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask3_fmadd_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) { return (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_fmadd_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_fmadd_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_fmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)__W, (__v2df)__A, -(__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_fmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, (__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask3_fmsub_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask3_fmsub_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) { return (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_fmsub_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)__W, (__v2df)__A, -(__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_fmsub_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)__W, (__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_fnmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_fnmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask3_fnmadd_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask3_fnmadd_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) { return (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_fnmadd_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_fnmadd_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_fnmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)__W, -(__v2df)__A, -(__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_fnmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, -(__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask3_fnmsub_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask3_fnmsub_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) { return (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_fnmsub_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)__W, -(__v2df)__A, -(__v2df)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_fnmsub_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)__W, -(__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } #ifdef __OPTIMIZE__ __funline __m128d _mm_mask_fmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask( (__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask_fmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask3_fmadd_round_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask3( (__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask3_fmadd_round_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask3( (__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_fmadd_round_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz( (__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_fmadd_round_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_maskz( (__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask_fmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask( (__v2df)__W, (__v2df)__A, -(__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask_fmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask( (__v4sf)__W, (__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask3_fmsub_round_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, const int __R) { return (__m128d)__builtin_ia32_vfmsubsd3_mask3( (__v2df)__W, (__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask3_fmsub_round_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, const int __R) { return (__m128)__builtin_ia32_vfmsubss3_mask3( (__v4sf)__W, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_fmsub_round_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz( (__v2df)__W, (__v2df)__A, -(__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_fmsub_round_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_maskz( (__v4sf)__W, (__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask_fnmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask( (__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask_fnmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask3_fnmadd_round_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask3( (__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask3_fnmadd_round_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask3( (__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_fnmadd_round_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz( (__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_fnmadd_round_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_maskz( (__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask_fnmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_mask( (__v2df)__W, -(__v2df)__A, -(__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask_fnmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_mask( (__v4sf)__W, -(__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_mask3_fnmsub_round_sd(__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, const int __R) { return (__m128d)__builtin_ia32_vfmsubsd3_mask3( (__v2df)__W, -(__v2df)__A, (__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_mask3_fnmsub_round_ss(__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, const int __R) { return (__m128)__builtin_ia32_vfmsubss3_mask3( (__v4sf)__W, -(__v4sf)__A, (__v4sf)__B, (__mmask8)__U, __R); } __funline __m128d _mm_maskz_fnmsub_round_sd(__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, const int __R) { return (__m128d)__builtin_ia32_vfmaddsd3_maskz( (__v2df)__W, -(__v2df)__A, -(__v2df)__B, (__mmask8)__U, __R); } __funline __m128 _mm_maskz_fnmsub_round_ss(__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, const int __R) { return (__m128)__builtin_ia32_vfmaddss3_maskz( (__v4sf)__W, -(__v4sf)__A, -(__v4sf)__B, (__mmask8)__U, __R); } #else #define _mm_mask_fmadd_round_sd(A, U, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask(A, B, C, U, R) #define _mm_mask_fmadd_round_ss(A, U, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_mask(A, B, C, U, R) #define _mm_mask3_fmadd_round_sd(A, B, C, U, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask3(A, B, C, U, R) #define _mm_mask3_fmadd_round_ss(A, B, C, U, R) \ (__m128) __builtin_ia32_vfmaddss3_mask3(A, B, C, U, R) #define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_maskz(A, B, C, U, R) #define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_maskz(A, B, C, U, R) #define _mm_mask_fmsub_round_sd(A, U, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask(A, B, -(C), U, R) #define _mm_mask_fmsub_round_ss(A, U, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_mask(A, B, -(C), U, R) #define _mm_mask3_fmsub_round_sd(A, B, C, U, R) \ (__m128d) __builtin_ia32_vfmsubsd3_mask3(A, B, C, U, R) #define _mm_mask3_fmsub_round_ss(A, B, C, U, R) \ (__m128) __builtin_ia32_vfmsubss3_mask3(A, B, C, U, R) #define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_maskz(A, B, -(C), U, R) #define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_maskz(A, B, -(C), U, R) #define _mm_mask_fnmadd_round_sd(A, U, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask(A, -(B), C, U, R) #define _mm_mask_fnmadd_round_ss(A, U, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_mask(A, -(B), C, U, R) #define _mm_mask3_fnmadd_round_sd(A, B, C, U, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask3(A, -(B), C, U, R) #define _mm_mask3_fnmadd_round_ss(A, B, C, U, R) \ (__m128) __builtin_ia32_vfmaddss3_mask3(A, -(B), C, U, R) #define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_maskz(A, -(B), C, U, R) #define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_maskz(A, -(B), C, U, R) #define _mm_mask_fnmsub_round_sd(A, U, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_mask(A, -(B), -(C), U, R) #define _mm_mask_fnmsub_round_ss(A, U, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_mask(A, -(B), -(C), U, R) #define _mm_mask3_fnmsub_round_sd(A, B, C, U, R) \ (__m128d) __builtin_ia32_vfmsubsd3_mask3(A, -(B), C, U, R) #define _mm_mask3_fnmsub_round_ss(A, B, C, U, R) \ (__m128) __builtin_ia32_vfmsubss3_mask3(A, -(B), C, U, R) #define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \ (__m128d) __builtin_ia32_vfmaddsd3_maskz(A, -(B), -(C), U, R) #define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \ (__m128) __builtin_ia32_vfmaddss3_maskz(A, -(B), -(C), U, R) #endif #ifdef __OPTIMIZE__ __funline int _mm_comi_round_ss(__m128 __A, __m128 __B, const int __P, const int __R) { return __builtin_ia32_vcomiss((__v4sf)__A, (__v4sf)__B, __P, __R); } __funline int _mm_comi_round_sd(__m128d __A, __m128d __B, const int __P, const int __R) { return __builtin_ia32_vcomisd((__v2df)__A, (__v2df)__B, __P, __R); } #else #define _mm_comi_round_ss(A, B, C, D) __builtin_ia32_vcomiss(A, B, C, D) #define _mm_comi_round_sd(A, B, C, D) __builtin_ia32_vcomisd(A, B, C, D) #endif __funline __m512d _mm512_sqrt_pd(__m512d __A) { return (__m512d)__builtin_ia32_sqrtpd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_sqrt_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_sqrtpd512_mask( (__v8df)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_sqrt_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_sqrtpd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_sqrt_ps(__m512 __A) { return (__m512)__builtin_ia32_sqrtps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_sqrtps512_mask( (__v16sf)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_sqrt_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_sqrtps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_add_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8df)__A + (__v8df)__B); } __funline __m512d _mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_addpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_addpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_add_ps(__m512 __A, __m512 __B) { return (__m512)((__v16sf)__A + (__v16sf)__B); } __funline __m512 _mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_addps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_addps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_add_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_addsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_add_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_addsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_add_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_addss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_add_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_addss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_sub_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8df)__A - (__v8df)__B); } __funline __m512d _mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_subpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_subpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_sub_ps(__m512 __A, __m512 __B) { return (__m512)((__v16sf)__A - (__v16sf)__B); } __funline __m512 _mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_subps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_subps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_sub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_subsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_sub_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_subsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_sub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_subss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_sub_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_subss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mul_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8df)__A * (__v8df)__B); } __funline __m512d _mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_mulpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_mulpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mul_ps(__m512 __A, __m512 __B) { return (__m512)((__v16sf)__A * (__v16sf)__B); } __funline __m512 _mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_mulps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_mulps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_mul_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_mulsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_mul_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_mulsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_mul_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_mulss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_mul_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_mulss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_div_pd(__m512d __M, __m512d __V) { return (__m512d)((__v8df)__M / (__v8df)__V); } __funline __m512d _mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) { return (__m512d)__builtin_ia32_divpd512_mask((__v8df)__M, (__v8df)__V, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_div_pd(__mmask8 __U, __m512d __M, __m512d __V) { return (__m512d)__builtin_ia32_divpd512_mask( (__v8df)__M, (__v8df)__V, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_div_ps(__m512 __A, __m512 __B) { return (__m512)((__v16sf)__A / (__v16sf)__B); } __funline __m512 _mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_divps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_divps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_div_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_divsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_div_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_divsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_div_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_divss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_div_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_divss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_max_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_maxpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_max_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_maxpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_max_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_maxpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_max_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_maxps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_max_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_maxps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_max_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_maxps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_max_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_maxsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_max_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_maxsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_max_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_max_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_min_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_minpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_min_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_minpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_min_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_minpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_min_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_minps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_min_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_minps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_min_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_minps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_min_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_minsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_min_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_minsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_min_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_min_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_scalef_pd(__m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_scalefpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_scalef_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_scalef_pd(__mmask8 __U, __m512d __A, __m512d __B) { return (__m512d)__builtin_ia32_scalefpd512_mask( (__v8df)__A, (__v8df)__B, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_scalef_ps(__m512 __A, __m512 __B) { return (__m512)__builtin_ia32_scalefps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_scalef_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_scalefps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_scalef_ps(__mmask16 __U, __m512 __A, __m512 __B) { return (__m512)__builtin_ia32_scalefps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_scalef_sd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_scalefsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_scalef_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_scalefss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmsubpd512_maskz((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmsubps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmsubps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmsubps512_maskz((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfmaddsubps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_mask( (__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3( (__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfmaddsubpd512_maskz( (__v8df)__A, (__v8df)__B, -(__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_mask( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfmsubaddps512_mask3( (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfmaddsubps512_maskz( (__v16sf)__A, (__v16sf)__B, -(__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfnmaddpd512_mask3((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmaddpd512_maskz((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfnmaddps512_mask3((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmaddps512_maskz((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { return (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { return (__m512d)__builtin_ia32_vfnmsubpd512_maskz((__v8df)__A, (__v8df)__B, (__v8df)__C, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) { return (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) { return (__m512)__builtin_ia32_vfnmsubps512_maskz((__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_cvttpd_epi32(__m512d __A) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_mask_cvttpd_epi32(__m256i __W, __mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask( (__v8df)__A, (__v8si)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_maskz_cvttpd_epi32(__mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvttpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_cvttpd_epu32(__m512d __A) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_mask_cvttpd_epu32(__m256i __W, __mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask( (__v8df)__A, (__v8si)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_maskz_cvttpd_epu32(__mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvttpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_cvtpd_epi32(__m512d __A) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_mask_cvtpd_epi32(__m256i __W, __mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask( (__v8df)__A, (__v8si)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_maskz_cvtpd_epi32(__mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvtpd2dq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_cvtpd_epu32(__m512d __A) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_mask_cvtpd_epu32(__m256i __W, __mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask( (__v8df)__A, (__v8si)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256i _mm512_maskz_cvtpd_epu32(__mmask8 __U, __m512d __A) { return (__m256i)__builtin_ia32_cvtpd2udq512_mask( (__v8df)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvttps_epi32(__m512 __A) { return (__m512i)__builtin_ia32_cvttps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttps_epi32(__m512i __W, __mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvttps2dq512_mask( (__v16sf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttps_epi32(__mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvttps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvttps_epu32(__m512 __A) { return (__m512i)__builtin_ia32_cvttps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvttps_epu32(__m512i __W, __mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvttps2udq512_mask( (__v16sf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvttps_epu32(__mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvttps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtps_epi32(__m512 __A) { return (__m512i)__builtin_ia32_cvtps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtps_epi32(__m512i __W, __mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvtps2dq512_mask( (__v16sf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtps_epi32(__mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvtps2dq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_cvtps_epu32(__m512 __A) { return (__m512i)__builtin_ia32_cvtps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_mask_cvtps_epu32(__m512i __W, __mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvtps2udq512_mask( (__v16sf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512i _mm512_maskz_cvtps_epu32(__mmask16 __U, __m512 __A) { return (__m512i)__builtin_ia32_cvtps2udq512_mask( (__v16sf)__A, (__v16si)_mm512_setzero_si512(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline double _mm512_cvtsd_f64(__m512d __A) { return __A[0]; } __funline float _mm512_cvtss_f32(__m512 __A) { return __A[0]; } #ifdef __x86_64__ __funline __m128 _mm_cvtu64_ss(__m128 __A, unsigned long long __B) { return (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)__A, __B, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_cvtu64_sd(__m128d __A, unsigned long long __B) { return (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)__A, __B, _MM_FROUND_CUR_DIRECTION); } #endif __funline __m128 _mm_cvtu32_ss(__m128 __A, unsigned __B) { return (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)__A, __B, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_cvtepi32_ps(__m512i __A) { return (__m512)__builtin_ia32_cvtdq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_cvtepi32_ps(__m512 __W, __mmask16 __U, __m512i __A) { return (__m512)__builtin_ia32_cvtdq2ps512_mask( (__v16si)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_cvtepi32_ps(__mmask16 __U, __m512i __A) { return (__m512)__builtin_ia32_cvtdq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_cvtepu32_ps(__m512i __A) { return (__m512)__builtin_ia32_cvtudq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_cvtepu32_ps(__m512 __W, __mmask16 __U, __m512i __A) { return (__m512)__builtin_ia32_cvtudq2ps512_mask( (__v16si)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_cvtepu32_ps(__mmask16 __U, __m512i __A) { return (__m512)__builtin_ia32_cvtudq2ps512_mask( (__v16si)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } #ifdef __OPTIMIZE__ __funline __m512d _mm512_fixupimm_pd(__m512d __A, __m512d __B, __m512i __C, const int __imm) { return (__m512d)__builtin_ia32_fixupimmpd512_mask( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_fixupimm_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512i __C, const int __imm) { return (__m512d)__builtin_ia32_fixupimmpd512_mask( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_fixupimm_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512i __C, const int __imm) { return (__m512d)__builtin_ia32_fixupimmpd512_maskz( (__v8df)__A, (__v8df)__B, (__v8di)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_fixupimm_ps(__m512 __A, __m512 __B, __m512i __C, const int __imm) { return (__m512)__builtin_ia32_fixupimmps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_fixupimm_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512i __C, const int __imm) { return (__m512)__builtin_ia32_fixupimmps512_mask( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_fixupimm_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512i __C, const int __imm) { return (__m512)__builtin_ia32_fixupimmps512_maskz( (__v16sf)__A, (__v16sf)__B, (__v16si)__C, __imm, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_fixupimm_sd(__m128d __A, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmsd_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_fixupimm_sd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmsd_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_fixupimm_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmsd_maskz( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_fixupimm_ss(__m128 __A, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmss_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_fixupimm_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmss_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_fixupimm_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmss_maskz( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } #else #define _mm512_fixupimm_pd(X, Y, Z, C) \ ((__m512d)__builtin_ia32_fixupimmpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_fixupimm_pd(X, U, Y, Z, C) \ ((__m512d)__builtin_ia32_fixupimmpd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_fixupimm_pd(U, X, Y, Z, C) \ ((__m512d)__builtin_ia32_fixupimmpd512_maskz( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_fixupimm_ps(X, Y, Z, C) \ ((__m512)__builtin_ia32_fixupimmps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_fixupimm_ps(X, U, Y, Z, C) \ ((__m512)__builtin_ia32_fixupimmps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_fixupimm_ps(U, X, Y, Z, C) \ ((__m512)__builtin_ia32_fixupimmps512_maskz( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), \ (int)(C), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_fixupimm_sd(X, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_fixupimm_sd(X, U, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_fixupimm_sd(U, X, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmsd_maskz( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_fixupimm_ss(X, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_fixupimm_ss(X, U, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_fixupimm_ss(U, X, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmss_maskz( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #endif #ifdef __x86_64__ __funline unsigned long long _mm_cvtss_u64(__m128 __A) { return (unsigned long long)__builtin_ia32_vcvtss2usi64( (__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } __funline unsigned long long _mm_cvttss_u64(__m128 __A) { return (unsigned long long)__builtin_ia32_vcvttss2usi64( (__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } __funline long long _mm_cvttss_i64(__m128 __A) { return (long long)__builtin_ia32_vcvttss2si64((__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } #endif /* __x86_64__ */ __funline unsigned _mm_cvtss_u32(__m128 __A) { return (unsigned)__builtin_ia32_vcvtss2usi32((__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } __funline unsigned _mm_cvttss_u32(__m128 __A) { return (unsigned)__builtin_ia32_vcvttss2usi32((__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } __funline int _mm_cvttss_i32(__m128 __A) { return (int)__builtin_ia32_vcvttss2si32((__v4sf)__A, _MM_FROUND_CUR_DIRECTION); } #ifdef __x86_64__ __funline unsigned long long _mm_cvtsd_u64(__m128d __A) { return (unsigned long long)__builtin_ia32_vcvtsd2usi64( (__v2df)__A, _MM_FROUND_CUR_DIRECTION); } __funline unsigned long long _mm_cvttsd_u64(__m128d __A) { return (unsigned long long)__builtin_ia32_vcvttsd2usi64( (__v2df)__A, _MM_FROUND_CUR_DIRECTION); } __funline long long _mm_cvttsd_i64(__m128d __A) { return (long long)__builtin_ia32_vcvttsd2si64((__v2df)__A, _MM_FROUND_CUR_DIRECTION); } #endif /* __x86_64__ */ __funline unsigned _mm_cvtsd_u32(__m128d __A) { return (unsigned)__builtin_ia32_vcvtsd2usi32((__v2df)__A, _MM_FROUND_CUR_DIRECTION); } __funline unsigned _mm_cvttsd_u32(__m128d __A) { return (unsigned)__builtin_ia32_vcvttsd2usi32((__v2df)__A, _MM_FROUND_CUR_DIRECTION); } __funline int _mm_cvttsd_i32(__m128d __A) { return (int)__builtin_ia32_vcvttsd2si32((__v2df)__A, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_cvtps_pd(__m256 __A) { return (__m512d)__builtin_ia32_cvtps2pd512_mask( (__v8sf)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_cvtps_pd(__m512d __W, __mmask8 __U, __m256 __A) { return (__m512d)__builtin_ia32_cvtps2pd512_mask( (__v8sf)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_cvtps_pd(__mmask8 __U, __m256 __A) { return (__m512d)__builtin_ia32_cvtps2pd512_mask( (__v8sf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_cvtph_ps(__m256i __A) { return (__m512)__builtin_ia32_vcvtph2ps512_mask( (__v16hi)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_cvtph_ps(__m512 __W, __mmask16 __U, __m256i __A) { return (__m512)__builtin_ia32_vcvtph2ps512_mask( (__v16hi)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_cvtph_ps(__mmask16 __U, __m256i __A) { return (__m512)__builtin_ia32_vcvtph2ps512_mask( (__v16hi)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_cvtpd_ps(__m512d __A) { return (__m256)__builtin_ia32_cvtpd2ps512_mask( (__v8df)__A, (__v8sf)_mm256_undefined_ps(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_mask_cvtpd_ps(__m256 __W, __mmask8 __U, __m512d __A) { return (__m256)__builtin_ia32_cvtpd2ps512_mask( (__v8df)__A, (__v8sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m256 _mm512_maskz_cvtpd_ps(__mmask8 __U, __m512d __A) { return (__m256)__builtin_ia32_cvtpd2ps512_mask( (__v8df)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } #ifdef __OPTIMIZE__ __funline __m512 _mm512_getexp_ps(__m512 __A) { return (__m512)__builtin_ia32_getexpps512_mask( (__v16sf)__A, (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_getexp_ps(__m512 __W, __mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_getexpps512_mask( (__v16sf)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_getexp_ps(__mmask16 __U, __m512 __A) { return (__m512)__builtin_ia32_getexpps512_mask( (__v16sf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_getexp_pd(__m512d __A) { return (__m512d)__builtin_ia32_getexppd512_mask( (__v8df)__A, (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_getexp_pd(__m512d __W, __mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_getexppd512_mask( (__v8df)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_getexp_pd(__mmask8 __U, __m512d __A) { return (__m512d)__builtin_ia32_getexppd512_mask( (__v8df)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_getexp_ss(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_getexpss128_round((__v4sf)__A, (__v4sf)__B, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_getexp_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_getexpss_mask_round((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_getexp_ss(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_getexpss_mask_round( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_getexp_sd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_getexpsd128_round((__v2df)__A, (__v2df)__B, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_getexp_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_getexpsd_mask_round((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_getexp_sd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_getexpsd_mask_round( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_getmant_pd(__m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, _mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_getmant_pd(__m512d __W, __mmask8 __U, __m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, (__v8df)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_getmant_pd(__mmask8 __U, __m512d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512d)__builtin_ia32_getmantpd512_mask( (__v8df)__A, (__C << 2) | __B, (__v8df)_mm512_setzero_pd(), __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_getmant_ps(__m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, _mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_getmant_ps(__m512 __W, __mmask16 __U, __m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, (__v16sf)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_getmant_ps(__mmask16 __U, __m512 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m512)__builtin_ia32_getmantps512_mask( (__v16sf)__A, (__C << 2) | __B, (__v16sf)_mm512_setzero_ps(), __U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_getmant_sd(__m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128d)__builtin_ia32_getmantsd_round( (__v2df)__A, (__v2df)__B, (__D << 2) | __C, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_mask_getmant_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128d)__builtin_ia32_getmantsd_mask_round( (__v2df)__A, (__v2df)__B, (__D << 2) | __C, (__v2df)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_maskz_getmant_sd(__mmask8 __U, __m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128d)__builtin_ia32_getmantsd_mask_round( (__v2df)__A, (__v2df)__B, (__D << 2) | __C, (__v2df)_mm_setzero_pd(), __U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_getmant_ss(__m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128)__builtin_ia32_getmantss_round( (__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_mask_getmant_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128)__builtin_ia32_getmantss_mask_round( (__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, (__v4sf)__W, __U, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_maskz_getmant_ss(__mmask8 __U, __m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) { return (__m128)__builtin_ia32_getmantss_mask_round( (__v4sf)__A, (__v4sf)__B, (__D << 2) | __C, (__v4sf)_mm_setzero_ps(), __U, _MM_FROUND_CUR_DIRECTION); } #else #define _mm512_getmant_pd(X, B, C) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), \ (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_getmant_pd(W, U, X, B, C) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), (__v8df)(__m512d)(W), \ (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_getmant_pd(U, X, B, C) \ ((__m512d)__builtin_ia32_getmantpd512_mask( \ (__v8df)(__m512d)(X), (int)(((C) << 2) | (B)), \ (__v8df)_mm512_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_getmant_ps(X, B, C) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), \ (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_getmant_ps(W, U, X, B, C) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), (__v16sf)(__m512)(W), \ (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_getmant_ps(U, X, B, C) \ ((__m512)__builtin_ia32_getmantps512_mask( \ (__v16sf)(__m512)(X), (int)(((C) << 2) | (B)), \ (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_getmant_sd(X, Y, C, D) \ ((__m128d)__builtin_ia32_getmantsd_round( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(((D) << 2) | (C)), \ _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_getmant_sd(W, U, X, Y, C, D) \ ((__m128d)__builtin_ia32_getmantsd_mask_round( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(((D) << 2) | (C)), \ (__v2df)(__m128d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_getmant_sd(U, X, Y, C, D) \ ((__m128d)__builtin_ia32_getmantsd_mask_round( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(((D) << 2) | (C)), \ (__v2df)_mm_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_getmant_ss(X, Y, C, D) \ ((__m128)__builtin_ia32_getmantss_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), \ _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_getmant_ss(W, U, X, Y, C, D) \ ((__m128)__builtin_ia32_getmantss_mask_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), \ (__v4sf)(__m128)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_maskz_getmant_ss(U, X, Y, C, D) \ ((__m128)__builtin_ia32_getmantss_mask_round( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(((D) << 2) | (C)), \ (__v4sf)_mm_setzero_ps(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) #define _mm_getexp_ss(A, B) \ ((__m128)__builtin_ia32_getexpss128_round( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_getexp_ss(W, U, A, B) \ (__m128) \ __builtin_ia32_getexpss_mask_round(A, B, W, U, _MM_FROUND_CUR_DIRECTION) #define _mm_maskz_getexp_ss(U, A, B) \ (__m128) __builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), \ U, _MM_FROUND_CUR_DIRECTION) #define _mm_getexp_sd(A, B) \ ((__m128d)__builtin_ia32_getexpsd128_round( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_getexp_sd(W, U, A, B) \ (__m128d) \ __builtin_ia32_getexpsd_mask_round(A, B, W, U, _MM_FROUND_CUR_DIRECTION) #define _mm_maskz_getexp_sd(U, A, B) \ (__m128d) __builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), \ U, _MM_FROUND_CUR_DIRECTION) #define _mm512_getexp_ps(A) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_getexp_ps(W, U, A) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)(__m512)(W), (__mmask16)(U), \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_getexp_ps(U, A) \ ((__m512)__builtin_ia32_getexpps512_mask( \ (__v16sf)(__m512)(A), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_getexp_pd(A) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)_mm512_undefined_pd(), (__mmask8)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_getexp_pd(W, U, A) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)(__m512d)(W), (__mmask8)(U), \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_getexp_pd(U, A) \ ((__m512d)__builtin_ia32_getexppd512_mask( \ (__v8df)(__m512d)(A), (__v8df)_mm512_setzero_pd(), (__mmask8)(U), \ _MM_FROUND_CUR_DIRECTION)) #endif #ifdef __OPTIMIZE__ __funline __m512 _mm512_roundscale_ps(__m512 __A, const int __imm) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__A, __imm, (__v16sf)_mm512_undefined_ps(), -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_mask_roundscale_ps(__m512 __A, __mmask16 __B, __m512 __C, const int __imm) { return (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)__C, __imm, (__v16sf)__A, (__mmask16)__B, _MM_FROUND_CUR_DIRECTION); } __funline __m512 _mm512_maskz_roundscale_ps(__mmask16 __A, __m512 __B, const int __imm) { return (__m512)__builtin_ia32_rndscaleps_mask( (__v16sf)__B, __imm, (__v16sf)_mm512_setzero_ps(), (__mmask16)__A, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_roundscale_pd(__m512d __A, const int __imm) { return (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)__A, __imm, (__v8df)_mm512_undefined_pd(), -1, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_mask_roundscale_pd(__m512d __A, __mmask8 __B, __m512d __C, const int __imm) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__C, __imm, (__v8df)__A, (__mmask8)__B, _MM_FROUND_CUR_DIRECTION); } __funline __m512d _mm512_maskz_roundscale_pd(__mmask8 __A, __m512d __B, const int __imm) { return (__m512d)__builtin_ia32_rndscalepd_mask( (__v8df)__B, __imm, (__v8df)_mm512_setzero_pd(), (__mmask8)__A, _MM_FROUND_CUR_DIRECTION); } __funline __m128 _mm_roundscale_ss(__m128 __A, __m128 __B, const int __imm) { return (__m128)__builtin_ia32_rndscaless_round( (__v4sf)__A, (__v4sf)__B, __imm, _MM_FROUND_CUR_DIRECTION); } __funline __m128d _mm_roundscale_sd(__m128d __A, __m128d __B, const int __imm) { return (__m128d)__builtin_ia32_rndscalesd_round( (__v2df)__A, (__v2df)__B, __imm, _MM_FROUND_CUR_DIRECTION); } #else #define _mm512_roundscale_ps(A, B) \ ((__m512)__builtin_ia32_rndscaleps_mask( \ (__v16sf)(__m512)(A), (int)(B), (__v16sf)_mm512_undefined_ps(), \ (__mmask16)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_roundscale_ps(A, B, C, D) \ ((__m512)__builtin_ia32_rndscaleps_mask( \ (__v16sf)(__m512)(C), (int)(D), (__v16sf)(__m512)(A), (__mmask16)(B), \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_roundscale_ps(A, B, C) \ ((__m512)__builtin_ia32_rndscaleps_mask( \ (__v16sf)(__m512)(B), (int)(C), (__v16sf)_mm512_setzero_ps(), \ (__mmask16)(A), _MM_FROUND_CUR_DIRECTION)) #define _mm512_roundscale_pd(A, B) \ ((__m512d)__builtin_ia32_rndscalepd_mask( \ (__v8df)(__m512d)(A), (int)(B), (__v8df)_mm512_undefined_pd(), \ (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_roundscale_pd(A, B, C, D) \ ((__m512d)__builtin_ia32_rndscalepd_mask( \ (__v8df)(__m512d)(C), (int)(D), (__v8df)(__m512d)(A), (__mmask8)(B), \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_maskz_roundscale_pd(A, B, C) \ ((__m512d)__builtin_ia32_rndscalepd_mask( \ (__v8df)(__m512d)(B), (int)(C), (__v8df)_mm512_setzero_pd(), \ (__mmask8)(A), _MM_FROUND_CUR_DIRECTION)) #define _mm_roundscale_ss(A, B, C) \ ((__m128)__builtin_ia32_rndscaless_round((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), (int)(C), \ _MM_FROUND_CUR_DIRECTION)) #define _mm_roundscale_sd(A, B, C) \ ((__m128d)__builtin_ia32_rndscalesd_round((__v2df)(__m128d)(A), \ (__v2df)(__m128d)(B), (int)(C), \ _MM_FROUND_CUR_DIRECTION)) #endif #ifdef __OPTIMIZE__ __funline __mmask8 _mm512_cmp_pd_mask(__m512d __X, __m512d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd512_mask( (__v8df)__X, (__v8df)__Y, __P, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmp_ps_mask(__m512 __X, __m512 __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpps512_mask( (__v16sf)__X, (__v16sf)__Y, __P, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmp_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y, const int __P) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, __P, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmp_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd512_mask( (__v8df)__X, (__v8df)__Y, __P, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpeq_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_EQ_OQ, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpeq_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_EQ_OQ, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmplt_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_LT_OS, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmplt_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_LT_OS, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmple_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_LE_OS, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmple_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_LE_OS, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpunord_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_UNORD_Q, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpunord_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_UNORD_Q, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpneq_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NEQ_UQ, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpneq_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NEQ_UQ, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpnlt_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NLT_US, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpnlt_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NLT_US, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpnle_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NLE_US, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpnle_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_NLE_US, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_cmpord_pd_mask(__m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_ORD_Q, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm512_mask_cmpord_pd_mask(__mmask8 __U, __m512d __X, __m512d __Y) { return (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)__X, (__v8df)__Y, _CMP_ORD_Q, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpeq_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_EQ_OQ, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpeq_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_EQ_OQ, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmplt_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_LT_OS, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmplt_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_LT_OS, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmple_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_LE_OS, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmple_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_LE_OS, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpunord_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_UNORD_Q, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpunord_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_UNORD_Q, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpneq_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NEQ_UQ, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpneq_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NEQ_UQ, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpnlt_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NLT_US, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpnlt_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NLT_US, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpnle_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NLE_US, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpnle_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_NLE_US, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_cmpord_ps_mask(__m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_ORD_Q, (__mmask16)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask16 _mm512_mask_cmpord_ps_mask(__mmask16 __U, __m512 __X, __m512 __Y) { return (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)__X, (__v16sf)__Y, _CMP_ORD_Q, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm_cmp_sd_mask(__m128d __X, __m128d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpsd_mask( (__v2df)__X, (__v2df)__Y, __P, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm_mask_cmp_sd_mask(__mmask8 __M, __m128d __X, __m128d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpsd_mask( (__v2df)__X, (__v2df)__Y, __P, (__mmask8)__M, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm_cmp_ss_mask(__m128 __X, __m128 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpss_mask( (__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); } __funline __mmask8 _mm_mask_cmp_ss_mask(__mmask8 __M, __m128 __X, __m128 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpss_mask( (__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)__M, _MM_FROUND_CUR_DIRECTION); } #else #define _mm512_cmp_pd_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(P), (__mmask8)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_cmp_ps_mask(X, Y, P) \ ((__mmask16)__builtin_ia32_cmpps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(P), (__mmask16)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_cmp_pd_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd512_mask( \ (__v8df)(__m512d)(X), (__v8df)(__m512d)(Y), (int)(P), (__mmask8)M, \ _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_cmp_ps_mask(M, X, Y, P) \ ((__mmask16)__builtin_ia32_cmpps512_mask( \ (__v16sf)(__m512)(X), (__v16sf)(__m512)(Y), (int)(P), (__mmask16)M, \ _MM_FROUND_CUR_DIRECTION)) #define _mm_cmp_sd_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpsd_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(P), (__mmask8)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_cmp_sd_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ (__v2df)(__m128d)(Y), (int)(P), M, \ _MM_FROUND_CUR_DIRECTION)) #define _mm_cmp_ss_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpss_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(P), (__mmask8)-1, \ _MM_FROUND_CUR_DIRECTION)) #define _mm_mask_cmp_ss_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ (__v4sf)(__m128)(Y), (int)(P), M, \ _MM_FROUND_CUR_DIRECTION)) #endif __funline __mmask16 _mm512_kmov(__mmask16 __A) { return __builtin_ia32_kmovw(__A); } __funline __m512 _mm512_castpd_ps(__m512d __A) { return (__m512)(__A); } __funline __m512i _mm512_castpd_si512(__m512d __A) { return (__m512i)(__A); } __funline __m512d _mm512_castps_pd(__m512 __A) { return (__m512d)(__A); } __funline __m512i _mm512_castps_si512(__m512 __A) { return (__m512i)(__A); } __funline __m512 _mm512_castsi512_ps(__m512i __A) { return (__m512)(__A); } __funline __m512d _mm512_castsi512_pd(__m512i __A) { return (__m512d)(__A); } __funline __m128d _mm512_castpd512_pd128(__m512d __A) { return (__m128d)_mm512_extractf32x4_ps((__m512)__A, 0); } __funline __m128 _mm512_castps512_ps128(__m512 __A) { return _mm512_extractf32x4_ps(__A, 0); } __funline __m128i _mm512_castsi512_si128(__m512i __A) { return (__m128i)_mm512_extracti32x4_epi32((__m512i)__A, 0); } __funline __m256d _mm512_castpd512_pd256(__m512d __A) { return _mm512_extractf64x4_pd(__A, 0); } __funline __m256 _mm512_castps512_ps256(__m512 __A) { return (__m256)_mm512_extractf64x4_pd((__m512d)__A, 0); } __funline __m256i _mm512_castsi512_si256(__m512i __A) { return (__m256i)_mm512_extractf64x4_pd((__m512d)__A, 0); } __funline __m512d _mm512_castpd128_pd512(__m128d __A) { return (__m512d)__builtin_ia32_pd512_pd((__m128d)__A); } __funline __m512 _mm512_castps128_ps512(__m128 __A) { return (__m512)__builtin_ia32_ps512_ps((__m128)__A); } __funline __m512i _mm512_castsi128_si512(__m128i __A) { return (__m512i)__builtin_ia32_si512_si((__v4si)__A); } __funline __m512d _mm512_castpd256_pd512(__m256d __A) { return __builtin_ia32_pd512_256pd(__A); } __funline __m512 _mm512_castps256_ps512(__m256 __A) { return __builtin_ia32_ps512_256ps(__A); } __funline __m512i _mm512_castsi256_si512(__m256i __A) { return (__m512i)__builtin_ia32_si512_256si((__v8si)__A); } __funline __mmask16 _mm512_cmpeq_epu32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__A, (__v16si)__B, 0, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpeq_epu32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__A, (__v16si)__B, 0, __U); } __funline __mmask8 _mm512_mask_cmpeq_epu64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__A, (__v8di)__B, 0, __U); } __funline __mmask8 _mm512_cmpeq_epu64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__A, (__v8di)__B, 0, (__mmask8)-1); } __funline __mmask16 _mm512_cmpgt_epu32_mask(__m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__A, (__v16si)__B, 6, (__mmask16)-1); } __funline __mmask16 _mm512_mask_cmpgt_epu32_mask(__mmask16 __U, __m512i __A, __m512i __B) { return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__A, (__v16si)__B, 6, __U); } __funline __mmask8 _mm512_mask_cmpgt_epu64_mask(__mmask8 __U, __m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__A, (__v8di)__B, 6, __U); } __funline __mmask8 _mm512_cmpgt_epu64_mask(__m512i __A, __m512i __B) { return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__A, (__v8di)__B, 6, (__mmask8)-1); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __v8si __T1 = (__v8si)_mm512_extracti64x4_epi64(__A, 1); \ __v8si __T2 = (__v8si)_mm512_extracti64x4_epi64(__A, 0); \ __m256i __T3 = (__m256i)(__T1 op __T2); \ __v4si __T4 = (__v4si)_mm256_extracti128_si256(__T3, 1); \ __v4si __T5 = (__v4si)_mm256_extracti128_si256(__T3, 0); \ __v4si __T6 = __T4 op __T5; \ __v4si __T7 = __builtin_shuffle(__T6, (__v4si){2, 3, 0, 1}); \ __v4si __T8 = __T6 op __T7; \ return __T8[0] op __T8[1] __funline int _mm512_reduce_add_epi32(__m512i __A) { __MM512_REDUCE_OP(+); } __funline int _mm512_reduce_mul_epi32(__m512i __A) { __MM512_REDUCE_OP(*); } __funline int _mm512_reduce_and_epi32(__m512i __A) { __MM512_REDUCE_OP(&); } __funline int _mm512_reduce_or_epi32(__m512i __A) { __MM512_REDUCE_OP(|); } __funline int _mm512_mask_reduce_add_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_maskz_mov_epi32(__U, __A); __MM512_REDUCE_OP(+); } __funline int _mm512_mask_reduce_mul_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __U, __A); __MM512_REDUCE_OP(*); } __funline int _mm512_mask_reduce_and_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0), __U, __A); __MM512_REDUCE_OP(&); } __funline int _mm512_mask_reduce_or_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_maskz_mov_epi32(__U, __A); __MM512_REDUCE_OP(|); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m256i __T1 = (__m256i)_mm512_extracti64x4_epi64(__A, 1); \ __m256i __T2 = (__m256i)_mm512_extracti64x4_epi64(__A, 0); \ __m256i __T3 = _mm256_##op(__T1, __T2); \ __m128i __T4 = (__m128i)_mm256_extracti128_si256(__T3, 1); \ __m128i __T5 = (__m128i)_mm256_extracti128_si256(__T3, 0); \ __m128i __T6 = _mm_##op(__T4, __T5); \ __m128i __T7 = \ (__m128i)__builtin_shuffle((__v4si)__T6, (__v4si){2, 3, 0, 1}); \ __m128i __T8 = _mm_##op(__T6, __T7); \ __m128i __T9 = \ (__m128i)__builtin_shuffle((__v4si)__T8, (__v4si){1, 0, 1, 0}); \ __v4si __T10 = (__v4si)_mm_##op(__T8, __T9); \ return __T10[0] __funline int _mm512_reduce_min_epi32(__m512i __A) { __MM512_REDUCE_OP(min_epi32); } __funline int _mm512_reduce_max_epi32(__m512i __A) { __MM512_REDUCE_OP(max_epi32); } __funline unsigned int _mm512_reduce_min_epu32(__m512i __A) { __MM512_REDUCE_OP(min_epu32); } __funline unsigned int _mm512_reduce_max_epu32(__m512i __A) { __MM512_REDUCE_OP(max_epu32); } __funline int _mm512_mask_reduce_min_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __U, __A); __MM512_REDUCE_OP(min_epi32); } __funline int _mm512_mask_reduce_max_epi32(__mmask16 __U, __m512i __A) { __A = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __U, __A); __MM512_REDUCE_OP(max_epi32); } __funline unsigned int _mm512_mask_reduce_min_epu32(__mmask16 __U, __m512i __A) { __A = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0), __U, __A); __MM512_REDUCE_OP(min_epu32); } __funline unsigned int _mm512_mask_reduce_max_epu32(__mmask16 __U, __m512i __A) { __A = _mm512_maskz_mov_epi32(__U, __A); __MM512_REDUCE_OP(max_epu32); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m256 __T1 = (__m256)_mm512_extractf64x4_pd((__m512d)__A, 1); \ __m256 __T2 = (__m256)_mm512_extractf64x4_pd((__m512d)__A, 0); \ __m256 __T3 = __T1 op __T2; \ __m128 __T4 = _mm256_extractf128_ps(__T3, 1); \ __m128 __T5 = _mm256_extractf128_ps(__T3, 0); \ __m128 __T6 = __T4 op __T5; \ __m128 __T7 = __builtin_shuffle(__T6, (__v4si){2, 3, 0, 1}); \ __m128 __T8 = __T6 op __T7; \ return __T8[0] op __T8[1] __funline float _mm512_reduce_add_ps(__m512 __A) { __MM512_REDUCE_OP(+); } __funline float _mm512_reduce_mul_ps(__m512 __A) { __MM512_REDUCE_OP(*); } __funline float _mm512_mask_reduce_add_ps(__mmask16 __U, __m512 __A) { __A = _mm512_maskz_mov_ps(__U, __A); __MM512_REDUCE_OP(+); } __funline float _mm512_mask_reduce_mul_ps(__mmask16 __U, __m512 __A) { __A = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __U, __A); __MM512_REDUCE_OP(*); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m256 __T1 = (__m256)_mm512_extractf64x4_pd((__m512d)__A, 1); \ __m256 __T2 = (__m256)_mm512_extractf64x4_pd((__m512d)__A, 0); \ __m256 __T3 = _mm256_##op(__T1, __T2); \ __m128 __T4 = _mm256_extractf128_ps(__T3, 1); \ __m128 __T5 = _mm256_extractf128_ps(__T3, 0); \ __m128 __T6 = _mm_##op(__T4, __T5); \ __m128 __T7 = __builtin_shuffle(__T6, (__v4si){2, 3, 0, 1}); \ __m128 __T8 = _mm_##op(__T6, __T7); \ __m128 __T9 = __builtin_shuffle(__T8, (__v4si){1, 0, 1, 0}); \ __m128 __T10 = _mm_##op(__T8, __T9); \ return __T10[0] __funline float _mm512_reduce_min_ps(__m512 __A) { __MM512_REDUCE_OP(min_ps); } __funline float _mm512_reduce_max_ps(__m512 __A) { __MM512_REDUCE_OP(max_ps); } __funline float _mm512_mask_reduce_min_ps(__mmask16 __U, __m512 __A) { __A = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __U, __A); __MM512_REDUCE_OP(min_ps); } __funline float _mm512_mask_reduce_max_ps(__mmask16 __U, __m512 __A) { __A = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __U, __A); __MM512_REDUCE_OP(max_ps); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __v4di __T1 = (__v4di)_mm512_extracti64x4_epi64(__A, 1); \ __v4di __T2 = (__v4di)_mm512_extracti64x4_epi64(__A, 0); \ __m256i __T3 = (__m256i)(__T1 op __T2); \ __v2di __T4 = (__v2di)_mm256_extracti128_si256(__T3, 1); \ __v2di __T5 = (__v2di)_mm256_extracti128_si256(__T3, 0); \ __v2di __T6 = __T4 op __T5; \ return __T6[0] op __T6[1] __funline long long _mm512_reduce_add_epi64(__m512i __A) { __MM512_REDUCE_OP(+); } __funline long long _mm512_reduce_mul_epi64(__m512i __A) { __MM512_REDUCE_OP(*); } __funline long long _mm512_reduce_and_epi64(__m512i __A) { __MM512_REDUCE_OP(&); } __funline long long _mm512_reduce_or_epi64(__m512i __A) { __MM512_REDUCE_OP(|); } __funline long long _mm512_mask_reduce_add_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_maskz_mov_epi64(__U, __A); __MM512_REDUCE_OP(+); } __funline long long _mm512_mask_reduce_mul_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_mask_mov_epi64(_mm512_set1_epi64(1LL), __U, __A); __MM512_REDUCE_OP(*); } __funline long long _mm512_mask_reduce_and_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0LL), __U, __A); __MM512_REDUCE_OP(&); } __funline long long _mm512_mask_reduce_or_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_maskz_mov_epi64(__U, __A); __MM512_REDUCE_OP(|); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m512i __T1 = _mm512_shuffle_i64x2(__A, __A, 0x4e); \ __m512i __T2 = _mm512_##op(__A, __T1); \ __m512i __T3 = (__m512i)__builtin_shuffle((__v8di)__T2, \ (__v8di){2, 3, 0, 1, 6, 7, 4, 5}); \ __m512i __T4 = _mm512_##op(__T2, __T3); \ __m512i __T5 = (__m512i)__builtin_shuffle((__v8di)__T4, \ (__v8di){1, 0, 3, 2, 5, 4, 7, 6}); \ __v8di __T6 = (__v8di)_mm512_##op(__T4, __T5); \ return __T6[0] __funline long long _mm512_reduce_min_epi64(__m512i __A) { __MM512_REDUCE_OP(min_epi64); } __funline long long _mm512_reduce_max_epi64(__m512i __A) { __MM512_REDUCE_OP(max_epi64); } __funline long long _mm512_mask_reduce_min_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __U, __A); __MM512_REDUCE_OP(min_epi64); } __funline long long _mm512_mask_reduce_max_epi64(__mmask8 __U, __m512i __A) { __A = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1), __U, __A); __MM512_REDUCE_OP(max_epi64); } __funline unsigned long long _mm512_reduce_min_epu64(__m512i __A) { __MM512_REDUCE_OP(min_epu64); } __funline unsigned long long _mm512_reduce_max_epu64(__m512i __A) { __MM512_REDUCE_OP(max_epu64); } __funline unsigned long long _mm512_mask_reduce_min_epu64(__mmask8 __U, __m512i __A) { __A = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0LL), __U, __A); __MM512_REDUCE_OP(min_epu64); } __funline unsigned long long _mm512_mask_reduce_max_epu64(__mmask8 __U, __m512i __A) { __A = _mm512_maskz_mov_epi64(__U, __A); __MM512_REDUCE_OP(max_epu64); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m256d __T1 = (__m256d)_mm512_extractf64x4_pd(__A, 1); \ __m256d __T2 = (__m256d)_mm512_extractf64x4_pd(__A, 0); \ __m256d __T3 = __T1 op __T2; \ __m128d __T4 = _mm256_extractf128_pd(__T3, 1); \ __m128d __T5 = _mm256_extractf128_pd(__T3, 0); \ __m128d __T6 = __T4 op __T5; \ return __T6[0] op __T6[1] __funline double _mm512_reduce_add_pd(__m512d __A) { __MM512_REDUCE_OP(+); } __funline double _mm512_reduce_mul_pd(__m512d __A) { __MM512_REDUCE_OP(*); } __funline double _mm512_mask_reduce_add_pd(__mmask8 __U, __m512d __A) { __A = _mm512_maskz_mov_pd(__U, __A); __MM512_REDUCE_OP(+); } __funline double _mm512_mask_reduce_mul_pd(__mmask8 __U, __m512d __A) { __A = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __U, __A); __MM512_REDUCE_OP(*); } #undef __MM512_REDUCE_OP #define __MM512_REDUCE_OP(op) \ __m256d __T1 = (__m256d)_mm512_extractf64x4_pd(__A, 1); \ __m256d __T2 = (__m256d)_mm512_extractf64x4_pd(__A, 0); \ __m256d __T3 = _mm256_##op(__T1, __T2); \ __m128d __T4 = _mm256_extractf128_pd(__T3, 1); \ __m128d __T5 = _mm256_extractf128_pd(__T3, 0); \ __m128d __T6 = _mm_##op(__T4, __T5); \ __m128d __T7 = (__m128d)__builtin_shuffle(__T6, (__v2di){1, 0}); \ __m128d __T8 = _mm_##op(__T6, __T7); \ return __T8[0] __funline double _mm512_reduce_min_pd(__m512d __A) { __MM512_REDUCE_OP(min_pd); } __funline double _mm512_reduce_max_pd(__m512d __A) { __MM512_REDUCE_OP(max_pd); } __funline double _mm512_mask_reduce_min_pd(__mmask8 __U, __m512d __A) { __A = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __U, __A); __MM512_REDUCE_OP(min_pd); } __funline double _mm512_mask_reduce_max_pd(__mmask8 __U, __m512d __A) { __A = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __U, __A); __MM512_REDUCE_OP(max_pd); } #undef __MM512_REDUCE_OP #ifdef __DISABLE_AVX512F__ #undef __DISABLE_AVX512F__ #pragma GCC pop_options #endif /* __DISABLE_AVX512F__ */ #endif /* _AVX512FINTRIN_H_INCLUDED */
442,762
10,327
jart/cosmopolitan
false
cosmopolitan/third_party/intel/immintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #define _IMMINTRIN_H_INCLUDED #ifdef __x86_64__ /* clang-format off */ #include "third_party/intel/mmintrin.internal.h" #include "third_party/intel/xmmintrin.internal.h" #include "third_party/intel/emmintrin.internal.h" #include "third_party/intel/pmmintrin.internal.h" #include "third_party/intel/tmmintrin.internal.h" #include "third_party/intel/smmintrin.internal.h" #include "third_party/intel/wmmintrin.internal.h" #include "third_party/intel/fxsrintrin.internal.h" #include "third_party/intel/xsaveintrin.internal.h" #include "third_party/intel/xsaveoptintrin.internal.h" #include "third_party/intel/xsavesintrin.internal.h" #include "third_party/intel/xsavecintrin.internal.h" #include "third_party/intel/avxintrin.internal.h" #include "third_party/intel/avx2intrin.internal.h" #include "third_party/intel/avx512fintrin.internal.h" #include "third_party/intel/avx512erintrin.internal.h" #include "third_party/intel/avx512pfintrin.internal.h" #include "third_party/intel/avx512cdintrin.internal.h" #include "third_party/intel/avx512vlintrin.internal.h" #include "third_party/intel/avx512bwintrin.internal.h" #include "third_party/intel/avx512dqintrin.internal.h" #include "third_party/intel/avx512vlbwintrin.internal.h" #include "third_party/intel/avx512vldqintrin.internal.h" #include "third_party/intel/avx512ifmaintrin.internal.h" #include "third_party/intel/avx512ifmavlintrin.internal.h" #include "third_party/intel/avx512vbmiintrin.internal.h" #include "third_party/intel/avx512vbmivlintrin.internal.h" #include "third_party/intel/avx5124fmapsintrin.internal.h" #include "third_party/intel/avx5124vnniwintrin.internal.h" #include "third_party/intel/avx512vpopcntdqintrin.internal.h" #include "third_party/intel/avx512vbmi2intrin.internal.h" #include "third_party/intel/avx512vbmi2vlintrin.internal.h" #include "third_party/intel/avx512vnniintrin.internal.h" #include "third_party/intel/avx512vnnivlintrin.internal.h" #include "third_party/intel/avx512vpopcntdqvlintrin.internal.h" #include "third_party/intel/avx512bitalgintrin.internal.h" #include "third_party/intel/shaintrin.internal.h" #include "third_party/intel/lzcntintrin.internal.h" #include "third_party/intel/bmiintrin.internal.h" #include "third_party/intel/bmi2intrin.internal.h" #include "third_party/intel/fmaintrin.internal.h" #include "third_party/intel/f16cintrin.internal.h" #include "third_party/intel/rtmintrin.internal.h" #include "third_party/intel/xtestintrin.internal.h" #include "third_party/intel/cetintrin.internal.h" #include "third_party/intel/gfniintrin.internal.h" #include "third_party/intel/vaesintrin.internal.h" #include "third_party/intel/vpclmulqdqintrin.internal.h" #include "third_party/intel/movdirintrin.internal.h" #include "third_party/intel/sgxintrin.internal.h" #include "third_party/intel/pconfigintrin.internal.h" #include "third_party/intel/waitpkgintrin.internal.h" #include "third_party/intel/cldemoteintrin.internal.h" #include "third_party/intel/rdseedintrin.internal.h" #include "third_party/intel/prfchwintrin.internal.h" #include "third_party/intel/adxintrin.internal.h" #include "third_party/intel/clwbintrin.internal.h" #include "third_party/intel/clflushoptintrin.internal.h" #include "third_party/intel/wbnoinvdintrin.internal.h" #include "third_party/intel/pkuintrin.internal.h" /* clang-format on */ __funline void _wbinvd(void) { __builtin_ia32_wbinvd(); } #ifndef __RDRND__ #pragma GCC push_options #pragma GCC target("rdrnd") #define __DISABLE_RDRND__ #endif /* __RDRND__ */ __funline int _rdrand16_step(unsigned short *__P) { return __builtin_ia32_rdrand16_step(__P); } __funline int _rdrand32_step(unsigned int *__P) { return __builtin_ia32_rdrand32_step(__P); } #ifdef __DISABLE_RDRND__ #undef __DISABLE_RDRND__ #pragma GCC pop_options #endif /* __DISABLE_RDRND__ */ #ifndef __RDPID__ #pragma GCC push_options #pragma GCC target("rdpid") #define __DISABLE_RDPID__ #endif /* __RDPID__ */ __funline unsigned int _rdpid_u32(void) { return __builtin_ia32_rdpid(); } #ifdef __DISABLE_RDPID__ #undef __DISABLE_RDPID__ #pragma GCC pop_options #endif /* __DISABLE_RDPID__ */ #ifdef __x86_64__ #ifndef __FSGSBASE__ #pragma GCC push_options #pragma GCC target("fsgsbase") #define __DISABLE_FSGSBASE__ #endif /* __FSGSBASE__ */ __funline unsigned int _readfsbase_u32(void) { return __builtin_ia32_rdfsbase32(); } __funline unsigned long long _readfsbase_u64(void) { return __builtin_ia32_rdfsbase64(); } __funline unsigned int _readgsbase_u32(void) { return __builtin_ia32_rdgsbase32(); } __funline unsigned long long _readgsbase_u64(void) { return __builtin_ia32_rdgsbase64(); } __funline void _writefsbase_u32(unsigned int __B) { __builtin_ia32_wrfsbase32(__B); } __funline void _writefsbase_u64(unsigned long long __B) { __builtin_ia32_wrfsbase64(__B); } __funline void _writegsbase_u32(unsigned int __B) { __builtin_ia32_wrgsbase32(__B); } __funline void _writegsbase_u64(unsigned long long __B) { __builtin_ia32_wrgsbase64(__B); } #ifdef __DISABLE_FSGSBASE__ #undef __DISABLE_FSGSBASE__ #pragma GCC pop_options #endif /* __DISABLE_FSGSBASE__ */ #ifndef __RDRND__ #pragma GCC push_options #pragma GCC target("rdrnd") #define __DISABLE_RDRND__ #endif /* __RDRND__ */ __funline int _rdrand64_step(unsigned long long *__P) { return __builtin_ia32_rdrand64_step(__P); } #ifdef __DISABLE_RDRND__ #undef __DISABLE_RDRND__ #pragma GCC pop_options #endif /* __DISABLE_RDRND__ */ #endif /* __x86_64__ */ #ifndef __PTWRITE__ #pragma GCC push_options #pragma GCC target("ptwrite") #define __DISABLE_PTWRITE__ #endif #ifdef __x86_64__ __funline void _ptwrite64(unsigned long long __B) { __builtin_ia32_ptwrite64(__B); } #endif /* __x86_64__ */ __funline void _ptwrite32(unsigned __B) { __builtin_ia32_ptwrite32(__B); } #ifdef __DISABLE_PTWRITE__ #undef __DISABLE_PTWRITE__ #pragma GCC pop_options #endif /* __DISABLE_PTWRITE__ */ #endif /* __x86_64__ */ #endif /* _IMMINTRIN_H_INCLUDED */
5,981
182
jart/cosmopolitan
false
cosmopolitan/third_party/intel/tmmintrin.internal.h
#ifndef _TMMINTRIN_H_INCLUDED #define _TMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/pmmintrin.internal.h" #ifndef __SSSE3__ #pragma GCC push_options #pragma GCC target("ssse3") #define __DISABLE_SSSE3__ #endif /* __SSSE3__ */ __funline __m128i _mm_hadd_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phaddw128((__v8hi)__X, (__v8hi)__Y); } __funline __m128i _mm_hadd_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phaddd128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_hadds_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__X, (__v8hi)__Y); } __funline __m64 _mm_hadd_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phaddw((__v4hi)__X, (__v4hi)__Y); } __funline __m64 _mm_hadd_pi32(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phaddd((__v2si)__X, (__v2si)__Y); } __funline __m64 _mm_hadds_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phaddsw((__v4hi)__X, (__v4hi)__Y); } __funline __m128i _mm_hsub_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phsubw128((__v8hi)__X, (__v8hi)__Y); } __funline __m128i _mm_hsub_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phsubd128((__v4si)__X, (__v4si)__Y); } __funline __m128i _mm_hsubs_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__X, (__v8hi)__Y); } __funline __m64 _mm_hsub_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phsubw((__v4hi)__X, (__v4hi)__Y); } __funline __m64 _mm_hsub_pi32(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phsubd((__v2si)__X, (__v2si)__Y); } __funline __m64 _mm_hsubs_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_phsubsw((__v4hi)__X, (__v4hi)__Y); } __funline __m128i _mm_maddubs_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__X, (__v16qi)__Y); } __funline __m64 _mm_maddubs_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__X, (__v8qi)__Y); } __funline __m128i _mm_mulhrs_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__X, (__v8hi)__Y); } __funline __m64 _mm_mulhrs_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__X, (__v4hi)__Y); } __funline __m128i _mm_shuffle_epi8(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pshufb128((__v16qi)__X, (__v16qi)__Y); } __funline __m64 _mm_shuffle_pi8(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_pshufb((__v8qi)__X, (__v8qi)__Y); } __funline __m128i _mm_sign_epi8(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psignb128((__v16qi)__X, (__v16qi)__Y); } __funline __m128i _mm_sign_epi16(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psignw128((__v8hi)__X, (__v8hi)__Y); } __funline __m128i _mm_sign_epi32(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psignd128((__v4si)__X, (__v4si)__Y); } __funline __m64 _mm_sign_pi8(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_psignb((__v8qi)__X, (__v8qi)__Y); } __funline __m64 _mm_sign_pi16(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_psignw((__v4hi)__X, (__v4hi)__Y); } __funline __m64 _mm_sign_pi32(__m64 __X, __m64 __Y) { return (__m64)__builtin_ia32_psignd((__v2si)__X, (__v2si)__Y); } #ifdef __OPTIMIZE__ __funline __m128i _mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N) { return (__m128i)__builtin_ia32_palignr128((__v2di)__X, (__v2di)__Y, __N * 8); } __funline __m64 _mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N) { return (__m64)__builtin_ia32_palignr((__v1di)__X, (__v1di)__Y, __N * 8); } #else #define _mm_alignr_epi8(X, Y, N) \ ((__m128i)__builtin_ia32_palignr128((__v2di)(__m128i)(X), \ (__v2di)(__m128i)(Y), (int)(N)*8)) #define _mm_alignr_pi8(X, Y, N) \ ((__m64)__builtin_ia32_palignr((__v1di)(__m64)(X), (__v1di)(__m64)(Y), \ (int)(N)*8)) #endif __funline __m128i _mm_abs_epi8(__m128i __X) { return (__m128i)__builtin_ia32_pabsb128((__v16qi)__X); } __funline __m128i _mm_abs_epi16(__m128i __X) { return (__m128i)__builtin_ia32_pabsw128((__v8hi)__X); } __funline __m128i _mm_abs_epi32(__m128i __X) { return (__m128i)__builtin_ia32_pabsd128((__v4si)__X); } __funline __m64 _mm_abs_pi8(__m64 __X) { return (__m64)__builtin_ia32_pabsb((__v8qi)__X); } __funline __m64 _mm_abs_pi16(__m64 __X) { return (__m64)__builtin_ia32_pabsw((__v4hi)__X); } __funline __m64 _mm_abs_pi32(__m64 __X) { return (__m64)__builtin_ia32_pabsd((__v2si)__X); } #ifdef __DISABLE_SSSE3__ #undef __DISABLE_SSSE3__ #pragma GCC pop_options #endif /* __DISABLE_SSSE3__ */ #endif /* __x86_64__ */ #endif /* _TMMINTRIN_H_INCLUDED */
4,862
156
jart/cosmopolitan
false
cosmopolitan/third_party/intel/pkuintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <pkuintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _PKUINTRIN_H_INCLUDED #define _PKUINTRIN_H_INCLUDED #ifndef __PKU__ #pragma GCC push_options #pragma GCC target("pku") #define __DISABLE_PKU__ #endif /* __PKU__ */ __funline unsigned int _rdpkru_u32(void) { return __builtin_ia32_rdpkru(); } __funline void _wrpkru(unsigned int __key) { __builtin_ia32_wrpkru(__key); } #ifdef __DISABLE_PKU__ #undef __DISABLE_PKU__ #pragma GCC pop_options #endif /* __DISABLE_PKU__ */ #endif /* _PKUINTRIN_H_INCLUDED */
586
28
jart/cosmopolitan
false
cosmopolitan/third_party/intel/cetintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <cetintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _CETINTRIN_H_INCLUDED #define _CETINTRIN_H_INCLUDED #ifndef __SHSTK__ #pragma GCC push_options #pragma GCC target("shstk") #define __DISABLE_SHSTK__ #endif /* __SHSTK__ */ #ifdef __x86_64__ __funline unsigned long long _get_ssp(void) { return __builtin_ia32_rdsspq(); } #else __funline unsigned int _get_ssp(void) { return __builtin_ia32_rdsspd(); } #endif __funline void _inc_ssp(unsigned int __B) { #ifdef __x86_64__ __builtin_ia32_incsspq((unsigned long long)__B); #else __builtin_ia32_incsspd(__B); #endif } __funline void _saveprevssp(void) { __builtin_ia32_saveprevssp(); } __funline void _rstorssp(void *__B) { __builtin_ia32_rstorssp(__B); } __funline void _wrssd(unsigned int __B, void *__C) { __builtin_ia32_wrssd(__B, __C); } #ifdef __x86_64__ __funline void _wrssq(unsigned long long __B, void *__C) { __builtin_ia32_wrssq(__B, __C); } #endif __funline void _wrussd(unsigned int __B, void *__C) { __builtin_ia32_wrussd(__B, __C); } #ifdef __x86_64__ __funline void _wrussq(unsigned long long __B, void *__C) { __builtin_ia32_wrussq(__B, __C); } #endif __funline void _setssbsy(void) { __builtin_ia32_setssbsy(); } __funline void _clrssbsy(void *__B) { __builtin_ia32_clrssbsy(__B); } #ifdef __DISABLE_SHSTK__ #undef __DISABLE_SHSTK__ #pragma GCC pop_options #endif /* __DISABLE_SHSTK__ */ #endif /* _CETINTRIN_H_INCLUDED. */
1,500
74
jart/cosmopolitan
false
cosmopolitan/third_party/intel/fmaintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <fmaintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _FMAINTRIN_H_INCLUDED #define _FMAINTRIN_H_INCLUDED #ifndef __FMA__ #pragma GCC push_options #pragma GCC target("fma") #define __DISABLE_FMA__ #endif /* __FMA__ */ __funline __m128d _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m256d _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m128 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m256 _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m128d _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m256d _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m128 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m256 _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m128d _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmsubsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmsubss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m256d _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m128 _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m256 _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m128d _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m256d _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m128 _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m256 _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m128d _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmsubsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m128 _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmsubss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m128d _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } __funline __m256d _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } __funline __m128 _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } __funline __m256 _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } __funline __m128d _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); } __funline __m256d _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); } __funline __m128 _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); } __funline __m256 _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); } #ifdef __DISABLE_FMA__ #undef __DISABLE_FMA__ #pragma GCC pop_options #endif /* __DISABLE_FMA__ */ #endif
6,819
178
jart/cosmopolitan
false
cosmopolitan/third_party/intel/vpclmulqdqintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <vpclmulqdqintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _VPCLMULQDQINTRIN_H_INCLUDED #define _VPCLMULQDQINTRIN_H_INCLUDED #if !defined(__VPCLMULQDQ__) || !defined(__AVX512F__) #pragma GCC push_options #pragma GCC target("vpclmulqdq,avx512f") #define __DISABLE_VPCLMULQDQF__ #endif /* __VPCLMULQDQF__ */ #ifdef __OPTIMIZE__ __funline __m512i _mm512_clmulepi64_epi128(__m512i __A, __m512i __B, const int __C) { return (__m512i)__builtin_ia32_vpclmulqdq_v8di((__v8di)__A, (__v8di)__B, __C); } #else #define _mm512_clmulepi64_epi128(A, B, C) \ ((__m512i)__builtin_ia32_vpclmulqdq_v8di((__v8di)(__m512i)(A), \ (__v8di)(__m512i)(B), (int)(C))) #endif #ifdef __DISABLE_VPCLMULQDQF__ #undef __DISABLE_VPCLMULQDQF__ #pragma GCC pop_options #endif /* __DISABLE_VPCLMULQDQF__ */ #if !defined(__VPCLMULQDQ__) || !defined(__AVX__) #pragma GCC push_options #pragma GCC target("vpclmulqdq,avx") #define __DISABLE_VPCLMULQDQ__ #endif /* __VPCLMULQDQ__ */ #ifdef __OPTIMIZE__ __funline __m256i _mm256_clmulepi64_epi128(__m256i __A, __m256i __B, const int __C) { return (__m256i)__builtin_ia32_vpclmulqdq_v4di((__v4di)__A, (__v4di)__B, __C); } #else #define _mm256_clmulepi64_epi128(A, B, C) \ ((__m256i)__builtin_ia32_vpclmulqdq_v4di((__v4di)(__m256i)(A), \ (__v4di)(__m256i)(B), (int)(C))) #endif #ifdef __DISABLE_VPCLMULQDQ__ #undef __DISABLE_VPCLMULQDQ__ #pragma GCC pop_options #endif /* __DISABLE_VPCLMULQDQ__ */ #endif /* _VPCLMULQDQINTRIN_H_INCLUDED */
1,743
53
jart/cosmopolitan
false
cosmopolitan/third_party/intel/mm_malloc.internal.h
#ifndef _MM_MALLOC_H_INCLUDED #define _MM_MALLOC_H_INCLUDED #ifdef __x86_64__ #include "libc/mem/mem.h" #ifndef __cplusplus extern int _mm_posix_memalign(void **, size_t, size_t) #else extern "C" int _mm_posix_memalign(void **, size_t, size_t) throw() #endif __asm__("posix_memalign"); static __inline void *_mm_malloc(size_t __size, size_t __alignment) { void *__ptr; if (__alignment == 1) return malloc(__size); if (__alignment == 2 || (sizeof(void *) == 8 && __alignment == 4)) __alignment = sizeof(void *); if (_mm_posix_memalign(&__ptr, __alignment, __size) == 0) return __ptr; else return NULL; } static __inline void _mm_free(void *__ptr) { free(__ptr); } #endif /* __x86_64__ */ #endif /* _MM_MALLOC_H_INCLUDED */
754
30
jart/cosmopolitan
false
cosmopolitan/third_party/intel/vaesintrin.internal.h
#ifndef __VAESINTRIN_H_INCLUDED #define __VAESINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/x86intrin.internal.h" #if !defined(__VAES__) || !defined(__AVX__) #pragma GCC push_options #pragma GCC target("vaes,avx") #define __DISABLE_VAES__ #endif /* __VAES__ */ __funline __m256i _mm256_aesdec_epi128(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_vaesdec_v32qi((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_aesdeclast_epi128(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_vaesdeclast_v32qi((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_aesenc_epi128(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_vaesenc_v32qi((__v32qi)__A, (__v32qi)__B); } __funline __m256i _mm256_aesenclast_epi128(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_vaesenclast_v32qi((__v32qi)__A, (__v32qi)__B); } #ifdef __DISABLE_VAES__ #undef __DISABLE_VAES__ #pragma GCC pop_options #endif /* __DISABLE_VAES__ */ #if !defined(__VAES__) || !defined(__AVX512F__) #pragma GCC push_options #pragma GCC target("vaes,avx512f") #define __DISABLE_VAESF__ #endif /* __VAES__ */ __funline __m512i _mm512_aesdec_epi128(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_vaesdec_v64qi((__v64qi)__A, (__v64qi)__B); } __funline __m512i _mm512_aesdeclast_epi128(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_vaesdeclast_v64qi((__v64qi)__A, (__v64qi)__B); } __funline __m512i _mm512_aesenc_epi128(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_vaesenc_v64qi((__v64qi)__A, (__v64qi)__B); } __funline __m512i _mm512_aesenclast_epi128(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_vaesenclast_v64qi((__v64qi)__A, (__v64qi)__B); } #ifdef __DISABLE_VAESF__ #undef __DISABLE_VAESF__ #pragma GCC pop_options #endif /* __DISABLE_VAES__ */ #endif /* __x86_64__ */ #endif /* __VAESINTRIN_H_INCLUDED */
1,907
62
jart/cosmopolitan
false
cosmopolitan/third_party/intel/prfchwintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED && !defined _MM3DNOW_H_INCLUDED #error \ "Never use <prfchwintrin.h> directly; include <immintrin.h> or <mm3dnow.h> instead." #endif #ifndef _PRFCHWINTRIN_H_INCLUDED #define _PRFCHWINTRIN_H_INCLUDED __funline void _m_prefetchw(void *__P) { __builtin_prefetch(__P, 1, 3 /* _MM_HINT_T0 */); } #endif /* _PRFCHWINTRIN_H_INCLUDED */
373
14
jart/cosmopolitan
false
cosmopolitan/third_party/intel/ia32intrin.internal.h
#ifndef _X86INTRIN_H_INCLUDED #error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead." #endif __funline int __bsfd(int __X) { return __builtin_ctz(__X); } __funline int __bsrd(int __X) { return __builtin_ia32_bsrsi(__X); } __funline int __bswapd(int __X) { return __builtin_bswap32(__X); } #ifndef __iamcu__ #ifndef __SSE4_2__ #pragma GCC push_options #pragma GCC target("sse4.2") #define __DISABLE_SSE4_2__ #endif /* __SSE4_2__ */ __funline unsigned int __crc32b(unsigned int __C, unsigned char __V) { return __builtin_ia32_crc32qi(__C, __V); } __funline unsigned int __crc32w(unsigned int __C, unsigned short __V) { return __builtin_ia32_crc32hi(__C, __V); } __funline unsigned int __crc32d(unsigned int __C, unsigned int __V) { return __builtin_ia32_crc32si(__C, __V); } #ifdef __DISABLE_SSE4_2__ #undef __DISABLE_SSE4_2__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_2__ */ #endif /* __iamcu__ */ __funline int __popcntd(unsigned int __X) { return __builtin_popcount(__X); } #ifndef __iamcu__ __funline unsigned long long __rdpmc(int __S) { return __builtin_ia32_rdpmc(__S); } #endif /* __iamcu__ */ __funline unsigned long long __rdtsc(void) { return __builtin_ia32_rdtsc(); } #ifndef __iamcu__ __funline unsigned long long __rdtscp(unsigned int *__A) { return __builtin_ia32_rdtscp(__A); } #endif /* __iamcu__ */ __funline unsigned char __rolb(unsigned char __X, int __C) { return __builtin_ia32_rolqi(__X, __C); } __funline unsigned short __rolw(unsigned short __X, int __C) { return __builtin_ia32_rolhi(__X, __C); } __funline unsigned int __rold(unsigned int __X, int __C) { __C &= 31; return (__X << __C) | (__X >> (-__C & 31)); } __funline unsigned char __rorb(unsigned char __X, int __C) { return __builtin_ia32_rorqi(__X, __C); } __funline unsigned short __rorw(unsigned short __X, int __C) { return __builtin_ia32_rorhi(__X, __C); } __funline unsigned int __rord(unsigned int __X, int __C) { __C &= 31; return (__X >> __C) | (__X << (-__C & 31)); } __funline void __pause(void) { __builtin_ia32_pause(); } #ifdef __x86_64__ __funline int __bsfq(long long __X) { return __builtin_ctzll(__X); } __funline int __bsrq(long long __X) { return __builtin_ia32_bsrdi(__X); } __funline long long __bswapq(long long __X) { return __builtin_bswap64(__X); } #ifndef __SSE4_2__ #pragma GCC push_options #pragma GCC target("sse4.2") #define __DISABLE_SSE4_2__ #endif /* __SSE4_2__ */ __funline unsigned long long __crc32q(unsigned long long __C, unsigned long long __V) { return __builtin_ia32_crc32di(__C, __V); } #ifdef __DISABLE_SSE4_2__ #undef __DISABLE_SSE4_2__ #pragma GCC pop_options #endif /* __DISABLE_SSE4_2__ */ __funline long long __popcntq(unsigned long long __X) { return __builtin_popcountll(__X); } __funline unsigned long long __rolq(unsigned long long __X, int __C) { __C &= 63; return (__X << __C) | (__X >> (-__C & 63)); } __funline unsigned long long __rorq(unsigned long long __X, int __C) { __C &= 63; return (__X >> __C) | (__X << (-__C & 63)); } __funline unsigned long long __readeflags(void) { return __builtin_ia32_readeflags_u64(); } __funline void __writeeflags(unsigned long long __X) { __builtin_ia32_writeeflags_u64(__X); } #define _bswap64(a) __bswapq(a) #define _popcnt64(a) __popcntq(a) #else __funline unsigned int __readeflags(void) { return __builtin_ia32_readeflags_u32(); } __funline void __writeeflags(unsigned int __X) { __builtin_ia32_writeeflags_u32(__X); } #endif #ifdef __LP64__ #define _lrotl(a, b) __rolq((a), (b)) #define _lrotr(a, b) __rorq((a), (b)) #else #define _lrotl(a, b) __rold((a), (b)) #define _lrotr(a, b) __rord((a), (b)) #endif #define _bit_scan_forward(a) __bsfd(a) #define _bit_scan_reverse(a) __bsrd(a) #define _bswap(a) __bswapd(a) #define _popcnt32(a) __popcntd(a) #ifndef __iamcu__ #define _rdpmc(a) __rdpmc(a) #define _rdtscp(a) __rdtscp(a) #endif /* __iamcu__ */ #define _rdtsc() __rdtsc() #define _rotwl(a, b) __rolw((a), (b)) #define _rotwr(a, b) __rorw((a), (b)) #define _rotl(a, b) __rold((a), (b)) #define _rotr(a, b) __rord((a), (b))
4,203
185
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512bwintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512BWINTRIN_H_INCLUDED #define _AVX512BWINTRIN_H_INCLUDED #ifndef __AVX512BW__ #pragma GCC push_options #pragma GCC target("avx512bw") #define __DISABLE_AVX512BW__ #endif /* __AVX512BW__ */ typedef short __v32hi __attribute__((__vector_size__(64))); typedef char __v64qi __attribute__((__vector_size__(64))); typedef unsigned long long __mmask64; __funline unsigned char _ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_ktestcsi(__A, __B); return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); } __funline unsigned char _ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_ktestcdi(__A, __B); return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); } __funline unsigned char _ktestz_mask32_u8(__mmask32 __A, __mmask32 __B) { return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); } __funline unsigned char _ktestz_mask64_u8(__mmask64 __A, __mmask64 __B) { return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); } __funline unsigned char _ktestc_mask32_u8(__mmask32 __A, __mmask32 __B) { return (unsigned char)__builtin_ia32_ktestcsi(__A, __B); } __funline unsigned char _ktestc_mask64_u8(__mmask64 __A, __mmask64 __B) { return (unsigned char)__builtin_ia32_ktestcdi(__A, __B); } __funline unsigned char _kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_kortestcsi(__A, __B); return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); } __funline unsigned char _kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__CF) { *__CF = (unsigned char)__builtin_ia32_kortestcdi(__A, __B); return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); } __funline unsigned char _kortestz_mask32_u8(__mmask32 __A, __mmask32 __B) { return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); } __funline unsigned char _kortestz_mask64_u8(__mmask64 __A, __mmask64 __B) { return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); } __funline unsigned char _kortestc_mask32_u8(__mmask32 __A, __mmask32 __B) { return (unsigned char)__builtin_ia32_kortestcsi(__A, __B); } __funline unsigned char _kortestc_mask64_u8(__mmask64 __A, __mmask64 __B) { return (unsigned char)__builtin_ia32_kortestcdi(__A, __B); } __funline __mmask32 _kadd_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kadd_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B); } __funline unsigned int _cvtmask32_u32(__mmask32 __A) { return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A); } __funline unsigned long long _cvtmask64_u64(__mmask64 __A) { return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A); } __funline __mmask32 _cvtu32_mask32(unsigned int __A) { return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A); } __funline __mmask64 _cvtu64_mask64(unsigned long long __A) { return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A); } __funline __mmask32 _load_mask32(__mmask32 *__A) { return (__mmask32)__builtin_ia32_kmovd(*__A); } __funline __mmask64 _load_mask64(__mmask64 *__A) { return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A); } __funline void _store_mask32(__mmask32 *__A, __mmask32 __B) { *(__mmask32 *)__A = __builtin_ia32_kmovd(__B); } __funline void _store_mask64(__mmask64 *__A, __mmask64 __B) { *(__mmask64 *)__A = __builtin_ia32_kmovq(__B); } __funline __mmask32 _knot_mask32(__mmask32 __A) { return (__mmask32)__builtin_ia32_knotsi((__mmask32)__A); } __funline __mmask64 _knot_mask64(__mmask64 __A) { return (__mmask64)__builtin_ia32_knotdi((__mmask64)__A); } __funline __mmask32 _kor_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kor_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B); } __funline __mmask32 _kxnor_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kxnor_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B); } __funline __mmask32 _kxor_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kxor_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B); } __funline __mmask32 _kand_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kand_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B); } __funline __mmask32 _kandn_mask32(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _kandn_mask64(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B); } __funline __m512i _mm512_mask_mov_epi16(__m512i __W, __mmask32 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdquhi512_mask((__v32hi)__A, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_mov_epi16(__mmask32 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdquhi512_mask( (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddquhi512_mask( (const short *)__P, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_loadu_epi16(__mmask32 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddquhi512_mask( (const short *)__P, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline void _mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) { __builtin_ia32_storedquhi512_mask((short *)__P, (__v32hi)__A, (__mmask32)__U); } __funline __m512i _mm512_mask_mov_epi8(__m512i __W, __mmask64 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdquqi512_mask((__v64qi)__A, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_mov_epi8(__mmask64 __U, __m512i __A) { return (__m512i)__builtin_ia32_movdquqi512_mask( (__v64qi)__A, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __mmask32 _mm512_kunpackw(__mmask32 __A, __mmask32 __B) { return (__mmask32)__builtin_ia32_kunpcksi((__mmask32)__A, (__mmask32)__B); } __funline __mmask32 _kunpackw_mask32(__mmask16 __A, __mmask16 __B) { return (__mmask32)__builtin_ia32_kunpcksi((__mmask32)__A, (__mmask32)__B); } __funline __mmask64 _mm512_kunpackd(__mmask64 __A, __mmask64 __B) { return (__mmask64)__builtin_ia32_kunpckdi((__mmask64)__A, (__mmask64)__B); } __funline __mmask64 _kunpackd_mask64(__mmask32 __A, __mmask32 __B) { return (__mmask64)__builtin_ia32_kunpckdi((__mmask64)__A, (__mmask64)__B); } __funline __m512i _mm512_mask_loadu_epi8(__m512i __W, __mmask64 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddquqi512_mask( (const char *)__P, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_loadu_epi8(__mmask64 __U, void const *__P) { return (__m512i)__builtin_ia32_loaddquqi512_mask( (const char *)__P, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline void _mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) { __builtin_ia32_storedquqi512_mask((char *)__P, (__v64qi)__A, (__mmask64)__U); } __funline __m512i _mm512_sad_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psadbw512((__v64qi)__A, (__v64qi)__B); } __funline __m256i _mm512_cvtepi16_epi8(__m512i __A) { return (__m256i)__builtin_ia32_pmovwb512_mask( (__v32hi)__A, (__v32qi)_mm256_undefined_si256(), (__mmask32)-1); } __funline void _mm512_mask_cvtepi16_storeu_epi8(void *__P, __mmask32 __M, __m512i __A) { __builtin_ia32_pmovwb512mem_mask((__v32qi *)__P, (__v32hi)__A, __M); } __funline __m256i _mm512_mask_cvtepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovwb512_mask((__v32hi)__A, (__v32qi)__O, __M); } __funline __m256i _mm512_maskz_cvtepi16_epi8(__mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovwb512_mask( (__v32hi)__A, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtsepi16_epi8(__m512i __A) { return (__m256i)__builtin_ia32_pmovswb512_mask( (__v32hi)__A, (__v32qi)_mm256_undefined_si256(), (__mmask32)-1); } __funline void _mm512_mask_cvtsepi16_storeu_epi8(void *__P, __mmask32 __M, __m512i __A) { __builtin_ia32_pmovswb512mem_mask((__v32qi *)__P, (__v32hi)__A, __M); } __funline __m256i _mm512_mask_cvtsepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovswb512_mask((__v32hi)__A, (__v32qi)__O, __M); } __funline __m256i _mm512_maskz_cvtsepi16_epi8(__mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovswb512_mask( (__v32hi)__A, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m256i _mm512_cvtusepi16_epi8(__m512i __A) { return (__m256i)__builtin_ia32_pmovuswb512_mask( (__v32hi)__A, (__v32qi)_mm256_undefined_si256(), (__mmask32)-1); } __funline __m256i _mm512_mask_cvtusepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovuswb512_mask((__v32hi)__A, (__v32qi)__O, __M); } __funline void _mm512_mask_cvtusepi16_storeu_epi8(void *__P, __mmask32 __M, __m512i __A) { __builtin_ia32_pmovuswb512mem_mask((__v32qi *)__P, (__v32hi)__A, __M); } __funline __m256i _mm512_maskz_cvtusepi16_epi8(__mmask32 __M, __m512i __A) { return (__m256i)__builtin_ia32_pmovuswb512_mask( (__v32hi)__A, (__v32qi)_mm256_setzero_si256(), __M); } __funline __m512i _mm512_broadcastb_epi8(__m128i __A) { return (__m512i)__builtin_ia32_pbroadcastb512_mask( (__v16qi)__A, (__v64qi)_mm512_undefined_epi32(), (__mmask64)-1); } __funline __m512i _mm512_mask_broadcastb_epi8(__m512i __O, __mmask64 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastb512_mask((__v16qi)__A, (__v64qi)__O, __M); } __funline __m512i _mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastb512_mask( (__v16qi)__A, (__v64qi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) { return (__m512i)__builtin_ia32_pbroadcastb512_gpr_mask(__A, (__v64qi)__O, __M); } __funline __m512i _mm512_maskz_set1_epi8(__mmask64 __M, char __A) { return (__m512i)__builtin_ia32_pbroadcastb512_gpr_mask( __A, (__v64qi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_broadcastw_epi16(__m128i __A) { return (__m512i)__builtin_ia32_pbroadcastw512_mask( (__v8hi)__A, (__v32hi)_mm512_undefined_epi32(), (__mmask32)-1); } __funline __m512i _mm512_mask_broadcastw_epi16(__m512i __O, __mmask32 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastw512_mask((__v8hi)__A, (__v32hi)__O, __M); } __funline __m512i _mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) { return (__m512i)__builtin_ia32_pbroadcastw512_mask( (__v8hi)__A, (__v32hi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) { return (__m512i)__builtin_ia32_pbroadcastw512_gpr_mask(__A, (__v32hi)__O, __M); } __funline __m512i _mm512_maskz_set1_epi16(__mmask32 __M, short __A) { return (__m512i)__builtin_ia32_pbroadcastw512_gpr_mask( __A, (__v32hi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mulhrs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhrsw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhrsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhrsw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mulhi_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mulhi_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhuw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_mulhi_epu16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmulhuw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mullo_epi16(__m512i __A, __m512i __B) { return (__m512i)((__v32hu)__A * (__v32hu)__B); } __funline __m512i _mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmullw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmullw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_cvtepi8_epi16(__m256i __A) { return (__m512i)__builtin_ia32_pmovsxbw512_mask( (__v32qi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovsxbw512_mask((__v32qi)__A, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovsxbw512_mask( (__v32qi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_cvtepu8_epi16(__m256i __A) { return (__m512i)__builtin_ia32_pmovzxbw512_mask( (__v32qi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovzxbw512_mask((__v32qi)__A, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A) { return (__m512i)__builtin_ia32_pmovzxbw512_mask( (__v32qi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_permutexvar_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarhi512_mask( (__v32hi)__B, (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_permutexvar_epi16(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarhi512_mask( (__v32hi)__B, (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__M); } __funline __m512i _mm512_mask_permutexvar_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarhi512_mask( (__v32hi)__B, (__v32hi)__A, (__v32hi)__W, (__mmask32)__M); } __funline __m512i _mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varhi512_mask( (__v32hi)__I /* idx */, (__v32hi)__A, (__v32hi)__B, (__mmask32)-1); } __funline __m512i _mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varhi512_mask( (__v32hi)__I /* idx */, (__v32hi)__A, (__v32hi)__B, (__mmask32)__U); } __funline __m512i _mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpermi2varhi512_mask((__v32hi)__A, (__v32hi)__I /* idx */, (__v32hi)__B, (__mmask32)__U); } __funline __m512i _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varhi512_maskz( (__v32hi)__I /* idx */, (__v32hi)__A, (__v32hi)__B, (__mmask32)__U); } __funline __m512i _mm512_avg_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_avg_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_avg_epu8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_add_epi8(__m512i __A, __m512i __B) { return (__m512i)((__v64qu)__A + (__v64qu)__B); } __funline __m512i _mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_sub_epi8(__m512i __A, __m512i __B) { return (__m512i)((__v64qu)__A - (__v64qu)__B); } __funline __m512i _mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_avg_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_avg_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_avg_epu16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pavgw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_subs_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_subs_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_subs_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_subs_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusb512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_subs_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_subs_epu8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusb512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_adds_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_adds_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_adds_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_adds_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusb512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_adds_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_adds_epu8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusb512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_sub_epi16(__m512i __A, __m512i __B) { return (__m512i)((__v32hu)__A - (__v32hu)__B); } __funline __m512i _mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_subs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_subs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_subs_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_subs_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_subs_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_subs_epu16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psubusw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_add_epi16(__m512i __A, __m512i __B) { return (__m512i)((__v32hu)__A + (__v32hu)__B); } __funline __m512i _mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_adds_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_adds_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_adds_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_adds_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_adds_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_paddusw512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_srl_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psrlw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_packs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packsswb512_mask( (__v32hi)__A, (__v32hi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_sll_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psllw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_maddubs_epi16(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmaddubsw512_mask( (__v64qi)__X, (__v64qi)__Y, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmaddubsw512_mask( (__v64qi)__X, (__v64qi)__Y, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_pmaddubsw512_mask( (__v64qi)__X, (__v64qi)__Y, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_madd_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaddwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)-1); } __funline __m512i _mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaddwd512_mask((__v32hi)__A, (__v32hi)__B, (__v16si)__W, (__mmask16)__U); } __funline __m512i _mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaddwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v16si)_mm512_setzero_si512(), (__mmask16)__U); } __funline __m512i _mm512_unpackhi_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_unpackhi_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpckhwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_unpacklo_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklbw512_mask( (__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_unpacklo_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_punpcklwd512_mask( (__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __mmask64 _mm512_cmpeq_epu8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__A, (__v64qi)__B, 0, (__mmask64)-1); } __funline __mmask64 _mm512_cmpeq_epi8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __mmask64 _mm512_mask_cmpeq_epu8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__A, (__v64qi)__B, 0, __U); } __funline __mmask64 _mm512_mask_cmpeq_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__A, (__v64qi)__B, __U); } __funline __mmask32 _mm512_cmpeq_epu16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__A, (__v32hi)__B, 0, (__mmask32)-1); } __funline __mmask32 _mm512_cmpeq_epi16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__A, (__v32hi)__B, (__mmask32)-1); } __funline __mmask32 _mm512_mask_cmpeq_epu16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__A, (__v32hi)__B, 0, __U); } __funline __mmask32 _mm512_mask_cmpeq_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__A, (__v32hi)__B, __U); } __funline __mmask64 _mm512_cmpgt_epu8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__A, (__v64qi)__B, 6, (__mmask64)-1); } __funline __mmask64 _mm512_cmpgt_epi8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __mmask64 _mm512_mask_cmpgt_epu8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__A, (__v64qi)__B, 6, __U); } __funline __mmask64 _mm512_mask_cmpgt_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__A, (__v64qi)__B, __U); } __funline __mmask32 _mm512_cmpgt_epu16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__A, (__v32hi)__B, 6, (__mmask32)-1); } __funline __mmask32 _mm512_cmpgt_epi16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__A, (__v32hi)__B, (__mmask32)-1); } __funline __mmask32 _mm512_mask_cmpgt_epu16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__A, (__v32hi)__B, 6, __U); } __funline __mmask32 _mm512_mask_cmpgt_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__A, (__v32hi)__B, __U); } __funline __mmask64 _mm512_movepi8_mask(__m512i __A) { return (__mmask64)__builtin_ia32_cvtb2mask512((__v64qi)__A); } __funline __mmask32 _mm512_movepi16_mask(__m512i __A) { return (__mmask32)__builtin_ia32_cvtw2mask512((__v32hi)__A); } __funline __m512i _mm512_movm_epi8(__mmask64 __A) { return (__m512i)__builtin_ia32_cvtmask2b512(__A); } __funline __m512i _mm512_movm_epi16(__mmask32 __A) { return (__m512i)__builtin_ia32_cvtmask2w512(__A); } __funline __mmask64 _mm512_test_epi8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ptestmb512((__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __mmask64 _mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ptestmb512((__v64qi)__A, (__v64qi)__B, __U); } __funline __mmask32 _mm512_test_epi16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ptestmw512((__v32hi)__A, (__v32hi)__B, (__mmask32)-1); } __funline __mmask32 _mm512_mask_test_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ptestmw512((__v32hi)__A, (__v32hi)__B, __U); } __funline __mmask64 _mm512_testn_epi8_mask(__m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ptestnmb512((__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __mmask64 _mm512_mask_testn_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) { return (__mmask64)__builtin_ia32_ptestnmb512((__v64qi)__A, (__v64qi)__B, __U); } __funline __mmask32 _mm512_testn_epi16_mask(__m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ptestnmw512((__v32hi)__A, (__v32hi)__B, (__mmask32)-1); } __funline __mmask32 _mm512_mask_testn_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) { return (__mmask32)__builtin_ia32_ptestnmw512((__v32hi)__A, (__v32hi)__B, __U); } __funline __m512i _mm512_shuffle_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pshufb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pshufb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pshufb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_min_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_min_epu16(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__M); } __funline __m512i _mm512_mask_min_epu16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__M); } __funline __m512i _mm512_min_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_min_epi16(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__M); } __funline __m512i _mm512_mask_min_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__M); } __funline __m512i _mm512_max_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_maskz_max_epu8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_mask_max_epu8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_max_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_maskz_max_epi8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_mask_max_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_min_epu8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_maskz_min_epu8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_mask_min_epu8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminub512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_min_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_maskz_min_epi8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_mask_min_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pminsb512_mask((__v64qi)__A, (__v64qi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_max_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_max_epi16(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__M); } __funline __m512i _mm512_mask_max_epi16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxsw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__M); } __funline __m512i _mm512_max_epu16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_max_epu16(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__M); } __funline __m512i _mm512_mask_max_epu16(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_pmaxuw512_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__M); } __funline __m512i _mm512_sra_epi16(__m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) { return (__m512i)__builtin_ia32_psraw512_mask((__v32hi)__A, (__v8hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_srav_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrav32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrav32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrav32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_srlv_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrlv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrlv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psrlv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_sllv_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psllv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_sllv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psllv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_psllv32hi_mask((__v32hi)__A, (__v32hi)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packsswb512_mask((__v32hi)__A, (__v32hi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packsswb512_mask( (__v32hi)__A, (__v32hi)__B, (__v64qi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_packus_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packuswb512_mask( (__v32hi)__A, (__v32hi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packuswb512_mask((__v32hi)__A, (__v32hi)__B, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packuswb512_mask( (__v32hi)__A, (__v32hi)__B, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_abs_epi8(__m512i __A) { return (__m512i)__builtin_ia32_pabsb512_mask( (__v64qi)__A, (__v64qi)_mm512_setzero_si512(), (__mmask64)-1); } __funline __m512i _mm512_mask_abs_epi8(__m512i __W, __mmask64 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsb512_mask((__v64qi)__A, (__v64qi)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_abs_epi8(__mmask64 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsb512_mask( (__v64qi)__A, (__v64qi)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_abs_epi16(__m512i __A) { return (__m512i)__builtin_ia32_pabsw512_mask( (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_abs_epi16(__m512i __W, __mmask32 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsw512_mask((__v32hi)__A, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) { return (__m512i)__builtin_ia32_pabsw512_mask( (__v32hi)__A, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __mmask64 _mm512_mask_cmpneq_epu8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 4, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmplt_epu8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 1, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmpge_epu8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 5, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmple_epu8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 2, (__mmask64)__M); } __funline __mmask32 _mm512_mask_cmpneq_epu16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 4, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmplt_epu16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 1, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmpge_epu16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 5, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmple_epu16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 2, (__mmask32)__M); } __funline __mmask64 _mm512_mask_cmpneq_epi8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 4, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmplt_epi8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 1, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmpge_epi8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 5, (__mmask64)__M); } __funline __mmask64 _mm512_mask_cmple_epi8_mask(__mmask64 __M, __m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 2, (__mmask64)__M); } __funline __mmask32 _mm512_mask_cmpneq_epi16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 4, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmplt_epi16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 1, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmpge_epi16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 5, (__mmask32)__M); } __funline __mmask32 _mm512_mask_cmple_epi16_mask(__mmask32 __M, __m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 2, (__mmask32)__M); } __funline __mmask64 _mm512_cmpneq_epu8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 4, (__mmask64)-1); } __funline __mmask64 _mm512_cmplt_epu8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 1, (__mmask64)-1); } __funline __mmask64 _mm512_cmpge_epu8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 5, (__mmask64)-1); } __funline __mmask64 _mm512_cmple_epu8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, 2, (__mmask64)-1); } __funline __mmask32 _mm512_cmpneq_epu16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 4, (__mmask32)-1); } __funline __mmask32 _mm512_cmplt_epu16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 1, (__mmask32)-1); } __funline __mmask32 _mm512_cmpge_epu16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 5, (__mmask32)-1); } __funline __mmask32 _mm512_cmple_epu16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, 2, (__mmask32)-1); } __funline __mmask64 _mm512_cmpneq_epi8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 4, (__mmask64)-1); } __funline __mmask64 _mm512_cmplt_epi8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 1, (__mmask64)-1); } __funline __mmask64 _mm512_cmpge_epi8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 5, (__mmask64)-1); } __funline __mmask64 _mm512_cmple_epi8_mask(__m512i __X, __m512i __Y) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, 2, (__mmask64)-1); } __funline __mmask32 _mm512_cmpneq_epi16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 4, (__mmask32)-1); } __funline __mmask32 _mm512_cmplt_epi16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 1, (__mmask32)-1); } __funline __mmask32 _mm512_cmpge_epi16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 5, (__mmask32)-1); } __funline __mmask32 _mm512_cmple_epi16_mask(__m512i __X, __m512i __Y) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, 2, (__mmask32)-1); } __funline __m512i _mm512_packs_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packssdw512_mask( (__v16si)__A, (__v16si)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packssdw512_mask( (__v16si)__A, (__v16si)__B, (__v32hi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packssdw512_mask((__v16si)__A, (__v16si)__B, (__v32hi)__W, __M); } __funline __m512i _mm512_packus_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packusdw512_mask( (__v16si)__A, (__v16si)__B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packusdw512_mask( (__v16si)__A, (__v16si)__B, (__v32hi)_mm512_setzero_si512(), __M); } __funline __m512i _mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packusdw512_mask((__v16si)__A, (__v16si)__B, (__v32hi)__W, __M); } #ifdef __OPTIMIZE__ __funline __mmask32 _kshiftli_mask32(__mmask32 __A, unsigned int __B) { return (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)__A, (__mmask8)__B); } __funline __mmask64 _kshiftli_mask64(__mmask64 __A, unsigned int __B) { return (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)__A, (__mmask8)__B); } __funline __mmask32 _kshiftri_mask32(__mmask32 __A, unsigned int __B) { return (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)__A, (__mmask8)__B); } __funline __mmask64 _kshiftri_mask64(__mmask64 __A, unsigned int __B) { return (__mmask64)__builtin_ia32_kshiftridi((__mmask64)__A, (__mmask8)__B); } __funline __m512i _mm512_alignr_epi8(__m512i __A, __m512i __B, const int __N) { return (__m512i)__builtin_ia32_palignr512((__v8di)__A, (__v8di)__B, __N * 8); } __funline __m512i _mm512_mask_alignr_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B, const int __N) { return (__m512i)__builtin_ia32_palignr512_mask( (__v8di)__A, (__v8di)__B, __N * 8, (__v8di)__W, (__mmask64)__U); } __funline __m512i _mm512_maskz_alignr_epi8(__mmask64 __U, __m512i __A, __m512i __B, const int __N) { return (__m512i)__builtin_ia32_palignr512_mask( (__v8di)__A, (__v8di)__B, __N * 8, (__v8di)_mm512_setzero_si512(), (__mmask64)__U); } __funline __m512i _mm512_dbsad_epu8(__m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_dbpsadbw512_mask( (__v64qi)__A, (__v64qi)__B, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_dbsad_epu8(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_dbpsadbw512_mask( (__v64qi)__A, (__v64qi)__B, __imm, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_dbsad_epu8(__mmask32 __U, __m512i __A, __m512i __B, const int __imm) { return (__m512i)__builtin_ia32_dbpsadbw512_mask( (__v64qi)__A, (__v64qi)__B, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_srli_epi16(__m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrlwi512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)__A, __imm, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrlwi512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_slli_epi16(__m512i __A, const int __B) { return (__m512i)__builtin_ia32_psllwi512_mask( (__v32hi)__A, __B, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)__A, __B, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, const int __B) { return (__m512i)__builtin_ia32_psllwi512_mask( (__v32hi)__A, __B, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_shufflehi_epi16(__m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshufhw512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_shufflehi_epi16(__m512i __W, __mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshufhw512_mask((__v32hi)__A, __imm, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_shufflehi_epi16(__mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshufhw512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_shufflelo_epi16(__m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshuflw512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_shufflelo_epi16(__m512i __W, __mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshuflw512_mask((__v32hi)__A, __imm, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_shufflelo_epi16(__mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_pshuflw512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_srai_epi16(__m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrawi512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)-1); } __funline __m512i _mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)__A, __imm, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, const int __imm) { return (__m512i)__builtin_ia32_psrawi512_mask( (__v32hi)__A, __imm, (__v32hi)_mm512_setzero_si512(), (__mmask32)__U); } __funline __m512i _mm512_mask_blend_epi16(__mmask32 __U, __m512i __A, __m512i __W) { return (__m512i)__builtin_ia32_blendmw_512_mask((__v32hi)__A, (__v32hi)__W, (__mmask32)__U); } __funline __m512i _mm512_mask_blend_epi8(__mmask64 __U, __m512i __A, __m512i __W) { return (__m512i)__builtin_ia32_blendmb_512_mask((__v64qi)__A, (__v64qi)__W, (__mmask64)__U); } __funline __mmask32 _mm512_mask_cmp_epi16_mask(__mmask32 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, __P, (__mmask32)__U); } __funline __mmask32 _mm512_cmp_epi16_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__X, (__v32hi)__Y, __P, (__mmask32)-1); } __funline __mmask64 _mm512_mask_cmp_epi8_mask(__mmask64 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, __P, (__mmask64)__U); } __funline __mmask64 _mm512_cmp_epi8_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__X, (__v64qi)__Y, __P, (__mmask64)-1); } __funline __mmask32 _mm512_mask_cmp_epu16_mask(__mmask32 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, __P, (__mmask32)__U); } __funline __mmask32 _mm512_cmp_epu16_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__X, (__v32hi)__Y, __P, (__mmask32)-1); } __funline __mmask64 _mm512_mask_cmp_epu8_mask(__mmask64 __U, __m512i __X, __m512i __Y, const int __P) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, __P, (__mmask64)__U); } __funline __mmask64 _mm512_cmp_epu8_mask(__m512i __X, __m512i __Y, const int __P) { return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__X, (__v64qi)__Y, __P, (__mmask64)-1); } __funline __m512i _mm512_bslli_epi128(__m512i __A, const int __N) { return (__m512i)__builtin_ia32_pslldq512(__A, __N * 8); } __funline __m512i _mm512_bsrli_epi128(__m512i __A, const int __N) { return (__m512i)__builtin_ia32_psrldq512(__A, __N * 8); } #else #define _kshiftli_mask32(X, Y) \ ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(X), (__mmask8)(Y))) #define _kshiftli_mask64(X, Y) \ ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(X), (__mmask8)(Y))) #define _kshiftri_mask32(X, Y) \ ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(X), (__mmask8)(Y))) #define _kshiftri_mask64(X, Y) \ ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(X), (__mmask8)(Y))) #define _mm512_alignr_epi8(X, Y, N) \ ((__m512i)__builtin_ia32_palignr512((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), (int)(N * 8))) #define _mm512_mask_alignr_epi8(W, U, X, Y, N) \ ((__m512i)__builtin_ia32_palignr512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(N * 8), \ (__v8di)(__m512i)(W), (__mmask64)(U))) #define _mm512_maskz_alignr_epi8(U, X, Y, N) \ ((__m512i)__builtin_ia32_palignr512_mask( \ (__v8di)(__m512i)(X), (__v8di)(__m512i)(Y), (int)(N * 8), \ (__v8di)(__m512i)_mm512_setzero_si512(), (__mmask64)(U))) #define _mm512_dbsad_epu8(X, Y, C) \ ((__m512i)__builtin_ia32_dbpsadbw512_mask( \ (__v64qi)(__m512i)(X), (__v64qi)(__m512i)(Y), (int)(C), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)-1)) #define _mm512_mask_dbsad_epu8(W, U, X, Y, C) \ ((__m512i)__builtin_ia32_dbpsadbw512_mask( \ (__v64qi)(__m512i)(X), (__v64qi)(__m512i)(Y), (int)(C), \ (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_dbsad_epu8(U, X, Y, C) \ ((__m512i)__builtin_ia32_dbpsadbw512_mask( \ (__v64qi)(__m512i)(X), (__v64qi)(__m512i)(Y), (int)(C), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)(U))) #define _mm512_srli_epi16(A, B) \ ((__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)_mm512_setzero_si512(), \ (__mmask32)-1)) #define _mm512_mask_srli_epi16(W, U, A, B) \ ((__m512i)__builtin_ia32_psrlwi512_mask( \ (__v32hi)(__m512i)(A), (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_srli_epi16(U, A, B) \ ((__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)_mm512_setzero_si512(), \ (__mmask32)(U))) #define _mm512_slli_epi16(X, C) \ ((__m512i)__builtin_ia32_psllwi512_mask( \ (__v32hi)(__m512i)(X), (int)(C), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)-1)) #define _mm512_mask_slli_epi16(W, U, X, C) \ ((__m512i)__builtin_ia32_psllwi512_mask( \ (__v32hi)(__m512i)(X), (int)(C), (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_slli_epi16(U, X, C) \ ((__m512i)__builtin_ia32_psllwi512_mask( \ (__v32hi)(__m512i)(X), (int)(C), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)(U))) #define _mm512_shufflehi_epi16(A, B) \ ((__m512i)__builtin_ia32_pshufhw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)-1)) #define _mm512_mask_shufflehi_epi16(W, U, A, B) \ ((__m512i)__builtin_ia32_pshufhw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_shufflehi_epi16(U, A, B) \ ((__m512i)__builtin_ia32_pshufhw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)(U))) #define _mm512_shufflelo_epi16(A, B) \ ((__m512i)__builtin_ia32_pshuflw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)-1)) #define _mm512_mask_shufflelo_epi16(W, U, A, B) \ ((__m512i)__builtin_ia32_pshuflw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_shufflelo_epi16(U, A, B) \ ((__m512i)__builtin_ia32_pshuflw512_mask( \ (__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)(__m512i)_mm512_setzero_si512(), (__mmask32)(U))) #define _mm512_srai_epi16(A, B) \ ((__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)_mm512_setzero_si512(), \ (__mmask32)-1)) #define _mm512_mask_srai_epi16(W, U, A, B) \ ((__m512i)__builtin_ia32_psrawi512_mask( \ (__v32hi)(__m512i)(A), (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) #define _mm512_maskz_srai_epi16(U, A, B) \ ((__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \ (__v32hi)_mm512_setzero_si512(), \ (__mmask32)(U))) #define _mm512_mask_blend_epi16(__U, __A, __W) \ ((__m512i)__builtin_ia32_blendmw_512_mask((__v32hi)(__A), (__v32hi)(__W), \ (__mmask32)(__U))) #define _mm512_mask_blend_epi8(__U, __A, __W) \ ((__m512i)__builtin_ia32_blendmb_512_mask((__v64qi)(__A), (__v64qi)(__W), \ (__mmask64)(__U))) #define _mm512_cmp_epi16_mask(X, Y, P) \ ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(X), \ (__v32hi)(__m512i)(Y), (int)(P), \ (__mmask32)(-1))) #define _mm512_cmp_epi8_mask(X, Y, P) \ ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(X), \ (__v64qi)(__m512i)(Y), (int)(P), \ (__mmask64)(-1))) #define _mm512_cmp_epu16_mask(X, Y, P) \ ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(X), \ (__v32hi)(__m512i)(Y), (int)(P), \ (__mmask32)(-1))) #define _mm512_cmp_epu8_mask(X, Y, P) \ ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(X), \ (__v64qi)(__m512i)(Y), (int)(P), \ (__mmask64)(-1))) #define _mm512_mask_cmp_epi16_mask(M, X, Y, P) \ ((__mmask32)__builtin_ia32_cmpw512_mask( \ (__v32hi)(__m512i)(X), (__v32hi)(__m512i)(Y), (int)(P), (__mmask32)(M))) #define _mm512_mask_cmp_epi8_mask(M, X, Y, P) \ ((__mmask64)__builtin_ia32_cmpb512_mask( \ (__v64qi)(__m512i)(X), (__v64qi)(__m512i)(Y), (int)(P), (__mmask64)(M))) #define _mm512_mask_cmp_epu16_mask(M, X, Y, P) \ ((__mmask32)__builtin_ia32_ucmpw512_mask( \ (__v32hi)(__m512i)(X), (__v32hi)(__m512i)(Y), (int)(P), (__mmask32)(M))) #define _mm512_mask_cmp_epu8_mask(M, X, Y, P) \ ((__mmask64)__builtin_ia32_ucmpb512_mask( \ (__v64qi)(__m512i)(X), (__v64qi)(__m512i)(Y), (int)(P), (__mmask64)(M))) #define _mm512_bslli_epi128(A, N) \ ((__m512i)__builtin_ia32_pslldq512((__m512i)(A), (int)(N)*8)) #define _mm512_bsrli_epi128(A, N) \ ((__m512i)__builtin_ia32_psrldq512((__m512i)(A), (int)(N)*8)) #endif #ifdef __DISABLE_AVX512BW__ #undef __DISABLE_AVX512BW__ #pragma GCC pop_options #endif /* __DISABLE_AVX512BW__ */ #endif /* _AVX512BWINTRIN_H_INCLUDED */
84,729
1,955
jart/cosmopolitan
false
cosmopolitan/third_party/intel/nmmintrin.internal.h
#ifndef _NMMINTRIN_H_INCLUDED #define _NMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/smmintrin.internal.h" #endif /* __x86_64__ */ #endif /* _NMMINTRIN_H_INCLUDED */
187
7
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vbmiintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512vbmiintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VBMIINTRIN_H_INCLUDED #define _AVX512VBMIINTRIN_H_INCLUDED #ifndef __AVX512VBMI__ #pragma GCC push_options #pragma GCC target("avx512vbmi") #define __DISABLE_AVX512VBMI__ #endif /* __AVX512VBMI__ */ __funline __m512i _mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_vpmultishiftqb512_mask( (__v64qi)__X, (__v64qi)__Y, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_vpmultishiftqb512_mask( (__v64qi)__X, (__v64qi)__Y, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_vpmultishiftqb512_mask( (__v64qi)__X, (__v64qi)__Y, (__v64qi)_mm512_undefined_epi32(), (__mmask64)-1); } __funline __m512i _mm512_permutexvar_epi8(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarqi512_mask( (__v64qi)__B, (__v64qi)__A, (__v64qi)_mm512_undefined_epi32(), (__mmask64)-1); } __funline __m512i _mm512_maskz_permutexvar_epi8(__mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarqi512_mask( (__v64qi)__B, (__v64qi)__A, (__v64qi)_mm512_setzero_si512(), (__mmask64)__M); } __funline __m512i _mm512_mask_permutexvar_epi8(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_permvarqi512_mask( (__v64qi)__B, (__v64qi)__A, (__v64qi)__W, (__mmask64)__M); } __funline __m512i _mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varqi512_mask( (__v64qi)__I /* idx */, (__v64qi)__A, (__v64qi)__B, (__mmask64)-1); } __funline __m512i _mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varqi512_mask( (__v64qi)__I /* idx */, (__v64qi)__A, (__v64qi)__B, (__mmask64)__U); } __funline __m512i _mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, __m512i __B) { return (__m512i)__builtin_ia32_vpermi2varqi512_mask((__v64qi)__A, (__v64qi)__I /* idx */, (__v64qi)__B, (__mmask64)__U); } __funline __m512i _mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, __m512i __B) { return (__m512i)__builtin_ia32_vpermt2varqi512_maskz( (__v64qi)__I /* idx */, (__v64qi)__A, (__v64qi)__B, (__mmask64)__U); } #ifdef __DISABLE_AVX512VBMI__ #undef __DISABLE_AVX512VBMI__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VBMI__ */ #endif /* _AVX512VBMIINTRIN_H_INCLUDED */
3,426
91
jart/cosmopolitan
false
cosmopolitan/third_party/intel/movdirintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <movdirintrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _MOVDIRINTRIN_H_INCLUDED #define _MOVDIRINTRIN_H_INCLUDED #ifndef __MOVDIRI__ #pragma GCC push_options #pragma GCC target("movdiri") #define __DISABLE_MOVDIRI__ #endif /* __MOVDIRI__ */ __funline void _directstoreu_u32(void *__P, unsigned int __A) { __builtin_ia32_directstoreu_u32((unsigned int *)__P, __A); } #ifdef __x86_64__ __funline void _directstoreu_u64(void *__P, unsigned long long __A) { __builtin_ia32_directstoreu_u64((unsigned long long *)__P, __A); } #endif #ifdef __DISABLE_MOVDIRI__ #undef __DISABLE_MOVDIRI__ #pragma GCC pop_options #endif /* __DISABLE_MOVDIRI__ */ #ifndef __MOVDIR64B__ #pragma GCC push_options #pragma GCC target("movdir64b") #define __DISABLE_MOVDIR64B__ #endif /* __MOVDIR64B__ */ __funline void _movdir64b(void *__P, const void *__Q) { __builtin_ia32_movdir64b(__P, __Q); } #ifdef __DISABLE_MOVDIR64B__ #undef __DISABLE_MOVDIR64B__ #pragma GCC pop_options #endif /* __DISABLE_MOVDIR64B__ */ #endif /* _MOVDIRINTRIN_H_INCLUDED. */
1,111
43
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xsavesintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _XSAVESINTRIN_H_INCLUDED #define _XSAVESINTRIN_H_INCLUDED #ifndef __XSAVES__ #pragma GCC push_options #pragma GCC target("xsaves") #define __DISABLE_XSAVES__ #endif /* __XSAVES__ */ __funline void _xsaves(void *__P, long long __M) { __builtin_ia32_xsaves(__P, __M); } __funline void _xrstors(void *__P, long long __M) { __builtin_ia32_xrstors(__P, __M); } #ifdef __x86_64__ __funline void _xrstors64(void *__P, long long __M) { __builtin_ia32_xrstors64(__P, __M); } __funline void _xsaves64(void *__P, long long __M) { __builtin_ia32_xsaves64(__P, __M); } #endif #ifdef __DISABLE_XSAVES__ #undef __DISABLE_XSAVES__ #pragma GCC pop_options #endif /* __DISABLE_XSAVES__ */ #endif /* _XSAVESINTRIN_H_INCLUDED */
852
38
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xsaveintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _XSAVEINTRIN_H_INCLUDED #define _XSAVEINTRIN_H_INCLUDED #ifndef __XSAVE__ #pragma GCC push_options #pragma GCC target("xsave") #define __DISABLE_XSAVE__ #endif /* __XSAVE__ */ __funline void _xsave(void *__P, long long __M) { __builtin_ia32_xsave(__P, __M); } __funline void _xrstor(void *__P, long long __M) { __builtin_ia32_xrstor(__P, __M); } __funline void _xsetbv(unsigned int __A, long long __V) { __builtin_ia32_xsetbv(__A, __V); } __funline long long _xgetbv(unsigned int __A) { return __builtin_ia32_xgetbv(__A); } #ifdef __x86_64__ __funline void _xsave64(void *__P, long long __M) { __builtin_ia32_xsave64(__P, __M); } __funline void _xrstor64(void *__P, long long __M) { __builtin_ia32_xrstor64(__P, __M); } #endif #ifdef __DISABLE_XSAVE__ #undef __DISABLE_XSAVE__ #pragma GCC pop_options #endif /* __DISABLE_XSAVE__ */ #endif /* _XSAVEINTRIN_H_INCLUDED */
1,017
46
jart/cosmopolitan
false
cosmopolitan/third_party/intel/clzerointrin.internal.h
#ifndef _CLZEROINTRIN_H_INCLUDED #define _CLZEROINTRIN_H_INCLUDED #ifdef __x86_64__ #ifndef __CLZERO__ #pragma GCC push_options #pragma GCC target("clzero") #define __DISABLE_CLZERO__ #endif /* __CLZERO__ */ __funline void _mm_clzero(void* __I) { __builtin_ia32_clzero(__I); } #ifdef __DISABLE_CLZERO__ #undef __DISABLE_CLZERO__ #pragma GCC pop_options #endif /* __DISABLE_CLZERO__ */ #endif /* __x86_64__ */ #endif /* _CLZEROINTRIN_H_INCLUDED */
453
22
jart/cosmopolitan
false
cosmopolitan/third_party/intel/bmi2intrin.internal.h
#if !defined _X86INTRIN_H_INCLUDED && !defined _IMMINTRIN_H_INCLUDED #error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead." #endif #ifndef _BMI2INTRIN_H_INCLUDED #define _BMI2INTRIN_H_INCLUDED #ifndef __BMI2__ #pragma GCC push_options #pragma GCC target("bmi2") #define __DISABLE_BMI2__ #endif /* __BMI2__ */ __funline unsigned int _bzhi_u32(unsigned int __X, unsigned int __Y) { return __builtin_ia32_bzhi_si(__X, __Y); } __funline unsigned int _pdep_u32(unsigned int __X, unsigned int __Y) { return __builtin_ia32_pdep_si(__X, __Y); } __funline unsigned int _pext_u32(unsigned int __X, unsigned int __Y) { return __builtin_ia32_pext_si(__X, __Y); } #ifdef __x86_64__ __funline unsigned long long _bzhi_u64(unsigned long long __X, unsigned long long __Y) { return __builtin_ia32_bzhi_di(__X, __Y); } __funline unsigned long long _pdep_u64(unsigned long long __X, unsigned long long __Y) { return __builtin_ia32_pdep_di(__X, __Y); } __funline unsigned long long _pext_u64(unsigned long long __X, unsigned long long __Y) { return __builtin_ia32_pext_di(__X, __Y); } __funline unsigned long long _mulx_u64(unsigned long long __X, unsigned long long __Y, unsigned long long *__P) { unsigned __int128 __res = (unsigned __int128)__X * __Y; *__P = (unsigned long long)(__res >> 64); return (unsigned long long)__res; } #else /* !__x86_64__ */ __funline unsigned int _mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) { unsigned long long __res = (unsigned long long)__X * __Y; *__P = (unsigned int)(__res >> 32); return (unsigned int)__res; } #endif /* !__x86_64__ */ #ifdef __DISABLE_BMI2__ #undef __DISABLE_BMI2__ #pragma GCC pop_options #endif /* __DISABLE_BMI2__ */ #endif /* _BMI2INTRIN_H_INCLUDED */
1,990
68
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512ifmaintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512ifmaintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512IFMAINTRIN_H_INCLUDED #define _AVX512IFMAINTRIN_H_INCLUDED #ifndef __AVX512IFMA__ #pragma GCC push_options #pragma GCC target("avx512ifma") #define __DISABLE_AVX512IFMA__ #endif /* __AVX512IFMA__ */ __funline __m512i _mm512_madd52lo_epu64(__m512i __X, __m512i __Y, __m512i __Z) { return (__m512i)__builtin_ia32_vpmadd52luq512_mask((__v8di)__X, (__v8di)__Y, (__v8di)__Z, (__mmask8)-1); } __funline __m512i _mm512_madd52hi_epu64(__m512i __X, __m512i __Y, __m512i __Z) { return (__m512i)__builtin_ia32_vpmadd52huq512_mask((__v8di)__X, (__v8di)__Y, (__v8di)__Z, (__mmask8)-1); } __funline __m512i _mm512_mask_madd52lo_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_vpmadd52luq512_mask( (__v8di)__W, (__v8di)__X, (__v8di)__Y, (__mmask8)__M); } __funline __m512i _mm512_mask_madd52hi_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { return (__m512i)__builtin_ia32_vpmadd52huq512_mask( (__v8di)__W, (__v8di)__X, (__v8di)__Y, (__mmask8)__M); } __funline __m512i _mm512_maskz_madd52lo_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) { return (__m512i)__builtin_ia32_vpmadd52luq512_maskz( (__v8di)__X, (__v8di)__Y, (__v8di)__Z, (__mmask8)__M); } __funline __m512i _mm512_maskz_madd52hi_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) { return (__m512i)__builtin_ia32_vpmadd52huq512_maskz( (__v8di)__X, (__v8di)__Y, (__v8di)__Z, (__mmask8)__M); } #ifdef __DISABLE_AVX512IFMA__ #undef __DISABLE_AVX512IFMA__ #pragma GCC pop_options #endif /* __DISABLE_AVX512IFMA__ */ #endif /* _AVX512IFMAINTRIN_H_INCLUDED */
2,036
54
jart/cosmopolitan
false
cosmopolitan/third_party/intel/pmmintrin.internal.h
#ifndef _PMMINTRIN_H_INCLUDED #define _PMMINTRIN_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/emmintrin.internal.h" #ifndef __SSE3__ #pragma GCC push_options #pragma GCC target("sse3") #define __DISABLE_SSE3__ #endif /* __SSE3__ */ #define _MM_DENORMALS_ZERO_MASK 0x0040 #define _MM_DENORMALS_ZERO_ON 0x0040 #define _MM_DENORMALS_ZERO_OFF 0x0000 #define _MM_SET_DENORMALS_ZERO_MODE(mode) \ _mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (mode)) #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) __funline __m128 _mm_addsub_ps(__m128 __X, __m128 __Y) { return (__m128)__builtin_ia32_addsubps((__v4sf)__X, (__v4sf)__Y); } __funline __m128 _mm_hadd_ps(__m128 __X, __m128 __Y) { return (__m128)__builtin_ia32_haddps((__v4sf)__X, (__v4sf)__Y); } __funline __m128 _mm_hsub_ps(__m128 __X, __m128 __Y) { return (__m128)__builtin_ia32_hsubps((__v4sf)__X, (__v4sf)__Y); } __funline __m128 _mm_movehdup_ps(__m128 __X) { return (__m128)__builtin_ia32_movshdup((__v4sf)__X); } __funline __m128 _mm_moveldup_ps(__m128 __X) { return (__m128)__builtin_ia32_movsldup((__v4sf)__X); } __funline __m128d _mm_addsub_pd(__m128d __X, __m128d __Y) { return (__m128d)__builtin_ia32_addsubpd((__v2df)__X, (__v2df)__Y); } __funline __m128d _mm_hadd_pd(__m128d __X, __m128d __Y) { return (__m128d)__builtin_ia32_haddpd((__v2df)__X, (__v2df)__Y); } __funline __m128d _mm_hsub_pd(__m128d __X, __m128d __Y) { return (__m128d)__builtin_ia32_hsubpd((__v2df)__X, (__v2df)__Y); } __funline __m128d _mm_loaddup_pd(double const *__P) { return _mm_load1_pd(__P); } __funline __m128d _mm_movedup_pd(__m128d __X) { return _mm_shuffle_pd(__X, __X, _MM_SHUFFLE2(0, 0)); } __funline __m128i _mm_lddqu_si128(__m128i const *__P) { return (__m128i)__builtin_ia32_lddqu((char const *)__P); } __funline void _mm_monitor(void const *__P, unsigned int __E, unsigned int __H) { __builtin_ia32_monitor(__P, __E, __H); } __funline void _mm_mwait(unsigned int __E, unsigned int __H) { __builtin_ia32_mwait(__E, __H); } #ifdef __DISABLE_SSE3__ #undef __DISABLE_SSE3__ #pragma GCC pop_options #endif /* __DISABLE_SSE3__ */ #endif /* __x86_64__ */ #endif /* _PMMINTRIN_H_INCLUDED */
2,224
79
jart/cosmopolitan
false
cosmopolitan/third_party/intel/f16cintrin.internal.h
#if !defined _X86INTRIN_H_INCLUDED && !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <f16intrin.h> directly; include <x86intrin.h> or <immintrin.h> instead." #endif #ifndef _F16CINTRIN_H_INCLUDED #define _F16CINTRIN_H_INCLUDED #ifndef __F16C__ #pragma GCC push_options #pragma GCC target("f16c") #define __DISABLE_F16C__ #endif /* __F16C__ */ __funline float _cvtsh_ss(unsigned short __S) { __v8hi __H = __extension__(__v8hi){(short)__S, 0, 0, 0, 0, 0, 0, 0}; __v4sf __A = __builtin_ia32_vcvtph2ps(__H); return __builtin_ia32_vec_ext_v4sf(__A, 0); } /** * Converts four half-precision (16-bit) floating point values to * single-precision floating point values. */ __funline __m128 _mm_cvtph_ps(__m128i __A) { return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__A); } /** * Converts eight half-precision (16-bit) floating point values to * single-precision floating point values. */ __funline __m256 _mm256_cvtph_ps(__m128i __A) { return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__A); } #ifdef __OPTIMIZE__ __funline unsigned short _cvtss_sh(float __F, const int __I) { __v4sf __A = __extension__(__v4sf){__F, 0, 0, 0}; __v8hi __H = __builtin_ia32_vcvtps2ph(__A, __I); return (unsigned short)__builtin_ia32_vec_ext_v8hi(__H, 0); } __funline __m128i _mm_cvtps_ph(__m128 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)__A, __I); } /** * Converts eight single-precision floating point values to * half-precision (16-bit) floating point values. */ __funline __m128i _mm256_cvtps_ph(__m256 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)__A, __I); } #else #define _cvtss_sh(__F, __I) \ (__extension__({ \ __v4sf __A = __extension__(__v4sf){__F, 0, 0, 0}; \ __v8hi __H = __builtin_ia32_vcvtps2ph(__A, __I); \ (unsigned short)__builtin_ia32_vec_ext_v8hi(__H, 0); \ })) #define _mm_cvtps_ph(A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)A, (int)(I))) #define _mm256_cvtps_ph(A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)A, (int)(I))) #endif /* __OPTIMIZE */ #ifdef __DISABLE_F16C__ #undef __DISABLE_F16C__ #pragma GCC pop_options #endif /* __DISABLE_F16C__ */ #endif /* _F16CINTRIN_H_INCLUDED */
2,312
76
jart/cosmopolitan
false
cosmopolitan/third_party/intel/clflushoptintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <clflushoptintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _CLFLUSHOPTINTRIN_H_INCLUDED #define _CLFLUSHOPTINTRIN_H_INCLUDED #ifndef __CLFLUSHOPT__ #pragma GCC push_options #pragma GCC target("clflushopt") #define __DISABLE_CLFLUSHOPT__ #endif /* __CLFLUSHOPT__ */ __funline void _mm_clflushopt(void *__A) { __builtin_ia32_clflushopt(__A); } #ifdef __DISABLE_CLFLUSHOPT__ #undef __DISABLE_CLFLUSHOPT__ #pragma GCC pop_options #endif /* __DISABLE_CLFLUSHOPT__ */ #endif /* _CLFLUSHOPTINTRIN_H_INCLUDED */
583
24
jart/cosmopolitan
false
cosmopolitan/third_party/intel/mm3dnow.internal.h
#ifndef _MM3DNOW_H_INCLUDED #define _MM3DNOW_H_INCLUDED #ifdef __x86_64__ #include "third_party/intel/mmintrin.internal.h" #include "third_party/intel/prfchwintrin.internal.h" #if defined __x86_64__ && !defined __SSE__ || !defined __3dNOW__ #pragma GCC push_options #ifdef __x86_64__ #pragma GCC target("sse,3dnow") #else #pragma GCC target("3dnow") #endif #define __DISABLE_3dNOW__ #endif /* __3dNOW__ */ __funline void _m_femms(void) { __builtin_ia32_femms(); } __funline __m64 _m_pavgusb(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pavgusb((__v8qi)__A, (__v8qi)__B); } __funline __m64 _m_pf2id(__m64 __A) { return (__m64)__builtin_ia32_pf2id((__v2sf)__A); } __funline __m64 _m_pfacc(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfacc((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfadd(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfadd((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfcmpeq(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfcmpge(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfcmpge((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfcmpgt(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfmax(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfmax((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfmin(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfmin((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfmul(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfmul((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfrcp(__m64 __A) { return (__m64)__builtin_ia32_pfrcp((__v2sf)__A); } __funline __m64 _m_pfrcpit1(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfrcpit2(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfrsqrt(__m64 __A) { return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__A); } __funline __m64 _m_pfrsqit1(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfsub(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfsub((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfsubr(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfsubr((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pi2fd(__m64 __A) { return (__m64)__builtin_ia32_pi2fd((__v2si)__A); } __funline __m64 _m_pmulhrw(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pmulhrw((__v4hi)__A, (__v4hi)__B); } __funline void _m_prefetch(void *__P) { __builtin_prefetch(__P, 0, 3 /* _MM_HINT_T0 */); } __funline __m64 _m_from_float(float __A) { return __extension__(__m64)(__v2sf){__A, 0.0f}; } __funline float _m_to_float(__m64 __A) { union { __v2sf v; float a[2]; } __tmp; __tmp.v = (__v2sf)__A; return __tmp.a[0]; } #ifdef __DISABLE_3dNOW__ #undef __DISABLE_3dNOW__ #pragma GCC pop_options #endif /* __DISABLE_3dNOW__ */ #if defined __x86_64__ && !defined __SSE__ || !defined __3dNOW_A__ #pragma GCC push_options #ifdef __x86_64__ #pragma GCC target("sse,3dnowa") #else #pragma GCC target("3dnowa") #endif #define __DISABLE_3dNOW_A__ #endif /* __3dNOW_A__ */ __funline __m64 _m_pf2iw(__m64 __A) { return (__m64)__builtin_ia32_pf2iw((__v2sf)__A); } __funline __m64 _m_pfnacc(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfnacc((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pfpnacc(__m64 __A, __m64 __B) { return (__m64)__builtin_ia32_pfpnacc((__v2sf)__A, (__v2sf)__B); } __funline __m64 _m_pi2fw(__m64 __A) { return (__m64)__builtin_ia32_pi2fw((__v2si)__A); } __funline __m64 _m_pswapd(__m64 __A) { return (__m64)__builtin_ia32_pswapdsf((__v2sf)__A); } #ifdef __DISABLE_3dNOW_A__ #undef __DISABLE_3dNOW_A__ #pragma GCC pop_options #endif /* __DISABLE_3dNOW_A__ */ #endif /* __x86_64__ */ #endif /* _MM3DNOW_H_INCLUDED */
3,984
156
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vlintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VLINTRIN_H_INCLUDED #define _AVX512VLINTRIN_H_INCLUDED #ifndef __AVX512VL__ #pragma GCC push_options #pragma GCC target("avx512vl") #define __DISABLE_AVX512VL__ #endif /* __AVX512VL__ */ typedef unsigned int __mmask32; __funline __m256d _mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_movapd256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_mov_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_movapd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_movapd128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_mov_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_movapd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_loadapd256_mask((__v4df *)__P, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_load_pd(__mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_loadapd256_mask( (__v4df *)__P, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_loadapd128_mask((__v2df *)__P, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_load_pd(__mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_loadapd128_mask( (__v2df *)__P, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline void _mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) { __builtin_ia32_storeapd256_mask((__v4df *)__P, (__v4df)__A, (__mmask8)__U); } __funline void _mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) { __builtin_ia32_storeapd128_mask((__v2df *)__P, (__v2df)__A, (__mmask8)__U); } __funline __m256 _mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movaps256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movaps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movaps128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_mov_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movaps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_loadaps256_mask((__v8sf *)__P, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_load_ps(__mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_loadaps256_mask( (__v8sf *)__P, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_loadaps128_mask((__v4sf *)__P, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_load_ps(__mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_loadaps128_mask( (__v4sf *)__P, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline void _mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) { __builtin_ia32_storeaps256_mask((__v8sf *)__P, (__v8sf)__A, (__mmask8)__U); } __funline void _mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) { __builtin_ia32_storeaps128_mask((__v4sf *)__P, (__v4sf)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdqa64_256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdqa64_256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdqa64_128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mov_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdqa64_128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_movdqa64load256_mask( (__v4di *)__P, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_load_epi64(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_movdqa64load256_mask( (__v4di *)__P, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_movdqa64load128_mask( (__v2di *)__P, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_load_epi64(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_movdqa64load128_mask( (__v2di *)__P, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_movdqa64store256_mask((__v4di *)__P, (__v4di)__A, (__mmask8)__U); } __funline void _mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_movdqa64store128_mask((__v2di *)__P, (__v2di)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdqa32_256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_movdqa32_256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdqa32_128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_mov_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_movdqa32_128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_movdqa32load256_mask( (__v8si *)__P, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_load_epi32(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_movdqa32load256_mask( (__v8si *)__P, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_movdqa32load128_mask( (__v4si *)__P, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_load_epi32(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_movdqa32load128_mask( (__v4si *)__P, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_movdqa32store256_mask((__v8si *)__P, (__v8si)__A, (__mmask8)__U); } __funline void _mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_movdqa32store128_mask((__v4si *)__P, (__v4si)__A, (__mmask8)__U); } __funline __m128d _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_addpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_addpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_addpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_addpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_addps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_addps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_addps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_addps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_subpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_subpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_subpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_subpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_subps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_subps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_subps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_subps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline void _mm256_store_epi64(void *__P, __m256i __A) { *(__m256i *)__P = __A; } __funline void _mm_store_epi64(void *__P, __m128i __A) { *(__m128i *)__P = __A; } __funline __m256d _mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_loadupd256_mask((const double *)__P, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_loadupd256_mask( (const double *)__P, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_loadupd128_mask((const double *)__P, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_loadu_pd(__mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_loadupd128_mask( (const double *)__P, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline void _mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) { __builtin_ia32_storeupd256_mask((double *)__P, (__v4df)__A, (__mmask8)__U); } __funline void _mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) { __builtin_ia32_storeupd128_mask((double *)__P, (__v2df)__A, (__mmask8)__U); } __funline __m256 _mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_loadups256_mask((const float *)__P, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_loadups256_mask( (const float *)__P, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_loadups128_mask((const float *)__P, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_loadu_ps(__mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_loadups128_mask( (const float *)__P, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline void _mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) { __builtin_ia32_storeups256_mask((float *)__P, (__v8sf)__A, (__mmask8)__U); } __funline void _mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) { __builtin_ia32_storeups128_mask((float *)__P, (__v4sf)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddqudi256_mask((const long long *)__P, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddqudi256_mask( (const long long *)__P, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddqudi128_mask((const long long *)__P, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddqudi128_mask( (const long long *)__P, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_storedqudi256_mask((long long *)__P, (__v4di)__A, (__mmask8)__U); } __funline void _mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_storedqudi128_mask((long long *)__P, (__v2di)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddqusi256_mask((const int *)__P, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_loaddqusi256_mask( (const int *)__P, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddqusi128_mask((const int *)__P, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_loaddqusi128_mask( (const int *)__P, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_storedqusi256_mask((int *)__P, (__v8si)__A, (__mmask8)__U); } __funline void _mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_storedqusi128_mask((int *)__P, (__v4si)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsd256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsd256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsd128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsd128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_abs_epi64(__m256i __A) { return (__m256i)__builtin_ia32_pabsq256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsq256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_pabsq256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_abs_epi64(__m128i __A) { return (__m128i)__builtin_ia32_pabsq128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsq128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_abs_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pabsq128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm256_cvtpd_epu32(__m256d __A) { return (__m128i)__builtin_ia32_cvtpd2udq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvtpd2udq256_mask((__v4df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvtpd2udq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_cvtpd_epu32(__m128d __A) { return (__m128i)__builtin_ia32_cvtpd2udq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvtpd2udq128_mask((__v2df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvtpd2udq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvttps2dq256_mask((__v8sf)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvttps2dq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvttps2dq128_mask((__v4sf)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvttps2dq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_cvttps_epu32(__m256 __A) { return (__m256i)__builtin_ia32_cvttps2udq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvttps2udq256_mask((__v8sf)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvttps2udq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_cvttps_epu32(__m128 __A) { return (__m128i)__builtin_ia32_cvttps2udq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvttps2udq128_mask((__v4sf)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvttps2udq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvttpd2dq256_mask((__v4df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvttpd2dq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvttpd2dq128_mask((__v2df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvttpd2dq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm256_cvttpd_epu32(__m256d __A) { return (__m128i)__builtin_ia32_cvttpd2udq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvttpd2udq256_mask((__v4df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvttpd2udq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_cvttpd_epu32(__m128d __A) { return (__m128i)__builtin_ia32_cvttpd2udq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvttpd2udq128_mask((__v2df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvttpd2udq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvtpd2dq256_mask((__v4df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A) { return (__m128i)__builtin_ia32_cvtpd2dq256_mask( (__v4df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvtpd2dq128_mask((__v2df)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A) { return (__m128i)__builtin_ia32_cvtpd2dq128_mask( (__v2df)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256d _mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) { return (__m256d)__builtin_ia32_cvtdq2pd256_mask((__v4si)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) { return (__m256d)__builtin_ia32_cvtdq2pd256_mask( (__v4si)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) { return (__m128d)__builtin_ia32_cvtdq2pd128_mask((__v4si)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) { return (__m128d)__builtin_ia32_cvtdq2pd128_mask( (__v4si)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_cvtepu32_pd(__m128i __A) { return (__m256d)__builtin_ia32_cvtudq2pd256_mask( (__v4si)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) { return (__m256d)__builtin_ia32_cvtudq2pd256_mask((__v4si)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) { return (__m256d)__builtin_ia32_cvtudq2pd256_mask( (__v4si)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_cvtepu32_pd(__m128i __A) { return (__m128d)__builtin_ia32_cvtudq2pd128_mask( (__v4si)__A, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) { return (__m128d)__builtin_ia32_cvtudq2pd128_mask((__v4si)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) { return (__m128d)__builtin_ia32_cvtudq2pd128_mask( (__v4si)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A) { return (__m256)__builtin_ia32_cvtdq2ps256_mask((__v8si)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A) { return (__m256)__builtin_ia32_cvtdq2ps256_mask( (__v8si)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_cvtdq2ps128_mask((__v4si)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_cvtdq2ps128_mask( (__v4si)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_cvtepu32_ps(__m256i __A) { return (__m256)__builtin_ia32_cvtudq2ps256_mask( (__v8si)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A) { return (__m256)__builtin_ia32_cvtudq2ps256_mask((__v8si)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A) { return (__m256)__builtin_ia32_cvtudq2ps256_mask( (__v8si)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_cvtepu32_ps(__m128i __A) { return (__m128)__builtin_ia32_cvtudq2ps128_mask( (__v4si)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_cvtudq2ps128_mask((__v4si)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_cvtudq2ps128_mask( (__v4si)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A) { return (__m256d)__builtin_ia32_cvtps2pd256_mask((__v4sf)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A) { return (__m256d)__builtin_ia32_cvtps2pd256_mask( (__v4sf)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A) { return (__m128d)__builtin_ia32_cvtps2pd128_mask((__v4sf)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A) { return (__m128d)__builtin_ia32_cvtps2pd128_mask( (__v4sf)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128i _mm_cvtepi32_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovdb128_mask( (__v4si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovdb128mem_mask((__v16qi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovdb128_mask((__v4si)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovdb128_mask( (__v4si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtepi32_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovdb256_mask( (__v8si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline __m128i _mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovdb256_mask((__v8si)__A, (__v16qi)__O, __M); } __funline void _mm256_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovdb256mem_mask((__v16qi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovdb256_mask( (__v8si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi32_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovsdb128_mask( (__v4si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovsdb128mem_mask((__v16qi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsdb128_mask((__v4si)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsdb128_mask( (__v4si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi32_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovsdb256_mask( (__v8si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovsdb256mem_mask((__v16qi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsdb256_mask((__v8si)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsdb256_mask( (__v8si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi32_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovusdb128_mask( (__v4si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovusdb128mem_mask((__v16qi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusdb128_mask((__v4si)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusdb128_mask( (__v4si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi32_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovusdb256_mask( (__v8si)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovusdb256mem_mask((__v16qi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusdb256_mask((__v8si)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusdb256_mask( (__v8si)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtepi32_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovdw128_mask( (__v4si)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovdw128mem_mask((__v8hi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovdw128_mask((__v4si)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovdw128_mask( (__v4si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtepi32_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovdw256_mask( (__v8si)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovdw256mem_mask((__v8hi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovdw256_mask((__v8si)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovdw256_mask( (__v8si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi32_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovsdw128_mask( (__v4si)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovsdw128mem_mask((__v8hi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsdw128_mask((__v4si)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsdw128_mask( (__v4si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi32_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovsdw256_mask( (__v8si)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovsdw256mem_mask((__v8hi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsdw256_mask((__v8si)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsdw256_mask( (__v8si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi32_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovusdw128_mask( (__v4si)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovusdw128mem_mask((__v8hi *)__P, (__v4si)__A, __M); } __funline __m128i _mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusdw128_mask((__v4si)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusdw128_mask( (__v4si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi32_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovusdw256_mask( (__v8si)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovusdw256mem_mask((__v8hi *)__P, (__v8si)__A, __M); } __funline __m128i _mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusdw256_mask((__v8si)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusdw256_mask( (__v8si)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtepi64_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovqb128_mask( (__v2di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovqb128mem_mask((__v16qi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqb128_mask((__v2di)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqb128_mask( (__v2di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtepi64_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovqb256_mask( (__v4di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovqb256mem_mask((__v16qi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqb256_mask((__v4di)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqb256_mask( (__v4di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi64_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovsqb128_mask( (__v2di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovsqb128mem_mask((__v16qi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqb128_mask((__v2di)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqb128_mask( (__v2di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi64_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovsqb256_mask( (__v4di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovsqb256mem_mask((__v16qi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqb256_mask((__v4di)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqb256_mask( (__v4di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi64_epi8(__m128i __A) { return (__m128i)__builtin_ia32_pmovusqb128_mask( (__v2di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovusqb128mem_mask((__v16qi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqb128_mask((__v2di)__A, (__v16qi)__O, __M); } __funline __m128i _mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqb128_mask( (__v2di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi64_epi8(__m256i __A) { return (__m128i)__builtin_ia32_pmovusqb256_mask( (__v4di)__A, (__v16qi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovusqb256mem_mask((__v16qi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqb256_mask((__v4di)__A, (__v16qi)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqb256_mask( (__v4di)__A, (__v16qi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtepi64_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovqw128_mask( (__v2di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovqw128mem_mask((__v8hi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqw128_mask((__v2di)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqw128_mask( (__v2di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtepi64_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovqw256_mask( (__v4di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovqw256mem_mask((__v8hi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqw256_mask((__v4di)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqw256_mask( (__v4di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi64_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovsqw128_mask( (__v2di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovsqw128mem_mask((__v8hi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqw128_mask((__v2di)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqw128_mask( (__v2di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi64_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovsqw256_mask( (__v4di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovsqw256mem_mask((__v8hi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqw256_mask((__v4di)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqw256_mask( (__v4di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi64_epi16(__m128i __A) { return (__m128i)__builtin_ia32_pmovusqw128_mask( (__v2di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovusqw128mem_mask((__v8hi *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqw128_mask((__v2di)__A, (__v8hi)__O, __M); } __funline __m128i _mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqw128_mask( (__v2di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi64_epi16(__m256i __A) { return (__m128i)__builtin_ia32_pmovusqw256_mask( (__v4di)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovusqw256mem_mask((__v8hi *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqw256_mask((__v4di)__A, (__v8hi)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqw256_mask( (__v4di)__A, (__v8hi)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtepi64_epi32(__m128i __A) { return (__m128i)__builtin_ia32_pmovqd128_mask( (__v2di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovqd128mem_mask((__v4si *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqd128_mask((__v2di)__A, (__v4si)__O, __M); } __funline __m128i _mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovqd128_mask( (__v2di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtepi64_epi32(__m256i __A) { return (__m128i)__builtin_ia32_pmovqd256_mask( (__v4di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovqd256mem_mask((__v4si *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqd256_mask((__v4di)__A, (__v4si)__O, __M); } __funline __m128i _mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovqd256_mask( (__v4di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtsepi64_epi32(__m128i __A) { return (__m128i)__builtin_ia32_pmovsqd128_mask( (__v2di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovsqd128mem_mask((__v4si *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqd128_mask((__v2di)__A, (__v4si)__O, __M); } __funline __m128i _mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovsqd128_mask( (__v2di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtsepi64_epi32(__m256i __A) { return (__m128i)__builtin_ia32_pmovsqd256_mask( (__v4di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovsqd256mem_mask((__v4si *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqd256_mask((__v4di)__A, (__v4si)__O, __M); } __funline __m128i _mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovsqd256_mask( (__v4di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_cvtusepi64_epi32(__m128i __A) { return (__m128i)__builtin_ia32_pmovusqd128_mask( (__v2di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A) { __builtin_ia32_pmovusqd128mem_mask((__v4si *)__P, (__v2di)__A, __M); } __funline __m128i _mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqd128_mask((__v2di)__A, (__v4si)__O, __M); } __funline __m128i _mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pmovusqd128_mask( (__v2di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm256_cvtusepi64_epi32(__m256i __A) { return (__m128i)__builtin_ia32_pmovusqd256_mask( (__v4di)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); } __funline void _mm256_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A) { __builtin_ia32_pmovusqd256mem_mask((__v4si *)__P, (__v4di)__A, __M); } __funline __m128i _mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqd256_mask((__v4di)__A, (__v4si)__O, __M); } __funline __m128i _mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_pmovusqd256_mask( (__v4di)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m256 _mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A) { return (__m256)__builtin_ia32_broadcastss256_mask((__v4sf)__A, (__v8sf)__O, __M); } __funline __m256 _mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) { return (__m256)__builtin_ia32_broadcastss256_mask( (__v4sf)__A, (__v8sf)_mm256_setzero_ps(), __M); } __funline __m128 _mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A) { return (__m128)__builtin_ia32_broadcastss128_mask((__v4sf)__A, (__v4sf)__O, __M); } __funline __m128 _mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) { return (__m128)__builtin_ia32_broadcastss128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), __M); } __funline __m256d _mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) { return (__m256d)__builtin_ia32_broadcastsd256_mask((__v2df)__A, (__v4df)__O, __M); } __funline __m256d _mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A) { return (__m256d)__builtin_ia32_broadcastsd256_mask( (__v2df)__A, (__v4df)_mm256_setzero_pd(), __M); } __funline __m256i _mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastd256_mask((__v4si)__A, (__v8si)__O, __M); } __funline __m256i _mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastd256_mask( (__v4si)__A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A) { return (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask(__A, (__v8si)__O, __M); } __funline __m256i _mm256_maskz_set1_epi32(__mmask8 __M, int __A) { return (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask( __A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastd128_mask((__v4si)__A, (__v4si)__O, __M); } __funline __m128i _mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastd128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A) { return (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask(__A, (__v4si)__O, __M); } __funline __m128i _mm_maskz_set1_epi32(__mmask8 __M, int __A) { return (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask( __A, (__v4si)_mm_setzero_si128(), __M); } __funline __m256i _mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastq256_mask((__v2di)__A, (__v4di)__O, __M); } __funline __m256i _mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_pbroadcastq256_mask( (__v2di)__A, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) { return (__m256i)__builtin_ia32_pbroadcastq256_gpr_mask(__A, (__v4di)__O, __M); } __funline __m256i _mm256_maskz_set1_epi64(__mmask8 __M, long long __A) { return (__m256i)__builtin_ia32_pbroadcastq256_gpr_mask( __A, (__v4di)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastq128_mask((__v2di)__A, (__v2di)__O, __M); } __funline __m128i _mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) { return (__m128i)__builtin_ia32_pbroadcastq128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) { return (__m128i)__builtin_ia32_pbroadcastq128_gpr_mask(__A, (__v2di)__O, __M); } __funline __m128i _mm_maskz_set1_epi64(__mmask8 __M, long long __A) { return (__m128i)__builtin_ia32_pbroadcastq128_gpr_mask( __A, (__v2di)_mm_setzero_si128(), __M); } __funline __m256 _mm256_broadcast_f32x4(__m128 __A) { return (__m256)__builtin_ia32_broadcastf32x4_256_mask( (__v4sf)__A, (__v8sf)_mm256_undefined_pd(), (__mmask8)-1); } __funline __m256 _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) { return (__m256)__builtin_ia32_broadcastf32x4_256_mask((__v4sf)__A, (__v8sf)__O, __M); } __funline __m256 _mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) { return (__m256)__builtin_ia32_broadcastf32x4_256_mask( (__v4sf)__A, (__v8sf)_mm256_setzero_ps(), __M); } __funline __m256i _mm256_broadcast_i32x4(__m128i __A) { return (__m256i)__builtin_ia32_broadcasti32x4_256_mask( (__v4si)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_broadcasti32x4_256_mask((__v4si)__A, (__v8si)__O, __M); } __funline __m256i _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A) { return (__m256i)__builtin_ia32_broadcasti32x4_256_mask( (__v4si)__A, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbd256_mask((__v16qi)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbd256_mask( (__v16qi)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbd128_mask((__v16qi)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbd128_mask( (__v16qi)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbq256_mask((__v16qi)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxbq256_mask( (__v16qi)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbq128_mask((__v16qi)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxbq128_mask( (__v16qi)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxwd256_mask((__v8hi)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxwd256_mask( (__v8hi)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxwd128_mask((__v8hi)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxwd128_mask( (__v8hi)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxwq256_mask((__v8hi)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovsxwq256_mask( (__v8hi)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxwq128_mask((__v8hi)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovsxwq128_mask( (__v8hi)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) { return (__m256i)__builtin_ia32_pmovsxdq256_mask((__v4si)__X, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) { return (__m256i)__builtin_ia32_pmovsxdq256_mask( (__v4si)__X, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) { return (__m128i)__builtin_ia32_pmovsxdq128_mask((__v4si)__X, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) { return (__m128i)__builtin_ia32_pmovsxdq128_mask( (__v4si)__X, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbd256_mask((__v16qi)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbd256_mask( (__v16qi)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbd128_mask((__v16qi)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbd128_mask( (__v16qi)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbq256_mask((__v16qi)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxbq256_mask( (__v16qi)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbq128_mask((__v16qi)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxbq128_mask( (__v16qi)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxwd256_mask((__v8hi)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxwd256_mask( (__v8hi)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxwd128_mask((__v8hi)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxwd128_mask( (__v8hi)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxwq256_mask((__v8hi)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) { return (__m256i)__builtin_ia32_pmovzxwq256_mask( (__v8hi)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxwq128_mask((__v8hi)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_pmovzxwq128_mask( (__v8hi)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) { return (__m256i)__builtin_ia32_pmovzxdq256_mask((__v4si)__X, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) { return (__m256i)__builtin_ia32_pmovzxdq256_mask( (__v4si)__X, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) { return (__m128i)__builtin_ia32_pmovzxdq128_mask((__v4si)__X, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) { return (__m128i)__builtin_ia32_pmovzxdq128_mask( (__v4si)__X, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256d _mm256_rcp14_pd(__m256d __A) { return (__m256d)__builtin_ia32_rcp14pd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_rcp14pd256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_rcp14pd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_rcp14_pd(__m128d __A) { return (__m128d)__builtin_ia32_rcp14pd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_rcp14pd128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_rcp14pd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_rcp14_ps(__m256 __A) { return (__m256)__builtin_ia32_rcp14ps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_rcp14ps256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_rcp14ps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_rcp14_ps(__m128 __A) { return (__m128)__builtin_ia32_rcp14ps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_rcp14ps128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_rcp14ps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_rsqrt14_pd(__m256d __A) { return (__m256d)__builtin_ia32_rsqrt14pd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_rsqrt14pd256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_rsqrt14pd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_rsqrt14_pd(__m128d __A) { return (__m128d)__builtin_ia32_rsqrt14pd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_rsqrt14pd128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_rsqrt14pd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_rsqrt14_ps(__m256 __A) { return (__m256)__builtin_ia32_rsqrt14ps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_rsqrt14ps256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_rsqrt14ps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_rsqrt14_ps(__m128 __A) { return (__m128)__builtin_ia32_rsqrt14ps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_rsqrt14ps128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_rsqrt14ps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_sqrtpd256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_sqrtpd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_sqrtpd128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_sqrtpd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_sqrtps256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_sqrtps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_sqrtps128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_sqrtps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_paddq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_psubq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_paddq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psubq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256 _mm256_getexp_ps(__m256 __A) { return (__m256)__builtin_ia32_getexpps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_getexpps256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_getexpps256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_getexp_pd(__m256d __A) { return (__m256d)__builtin_ia32_getexppd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_getexppd256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_getexppd256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_getexp_ps(__m128 __A) { return (__m128)__builtin_ia32_getexpps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_getexpps128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_getexp_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_getexpps128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_getexp_pd(__m128d __A) { return (__m128d)__builtin_ia32_getexppd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_getexppd128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_getexp_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_getexppd128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256i _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrld256_mask((__v8si)__A, (__v4si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrld256_mask( (__v8si)__A, (__v4si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrld128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrld128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlq256_mask((__v4di)__A, (__v2di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrlq256_mask( (__v4di)__A, (__v2di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrlq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256d _mm256_scalef_pd(__m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_scalefpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_scalefpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_scalefpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_scalef_ps(__m256 __A, __m256 __B) { return (__m256)__builtin_ia32_scalefps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_scalefps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_scalefps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_scalef_pd(__m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_scalefpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_scalefpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_scalefpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_scalef_ps(__m128 __A, __m128 __B) { return (__m128)__builtin_ia32_scalefps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_scalefps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_scalefps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfmaddpd256_mask3((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddpd256_maskz((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmaddpd128_mask3((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddpd128_maskz((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfmaddps256_mask3((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddps256_maskz((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfmaddps128_mask3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddps128_maskz((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m256d _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmsubpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfmsubpd256_mask3((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmsubpd256_maskz((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmsubpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmsubpd128_mask3((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmsubpd128_maskz((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmsubps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfmsubps256_mask3((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmsubps256_maskz((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmsubps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfmsubps128_mask3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmsubps128_maskz((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m256d _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfmaddsubpd256_mask3( (__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256_maskz( (__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmaddsubpd128_mask3( (__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd128_maskz( (__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfmaddsubps256_mask3( (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256_maskz( (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfmaddsubps128_mask3( (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps128_maskz( (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m256d _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256_mask( (__v4df)__A, (__v4df)__B, -(__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfmsubaddpd256_mask3( (__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfmaddsubpd256_maskz( (__v4df)__A, (__v4df)__B, -(__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd128_mask( (__v2df)__A, (__v2df)__B, -(__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfmsubaddpd128_mask3( (__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfmaddsubpd128_maskz( (__v2df)__A, (__v2df)__B, -(__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256_mask( (__v8sf)__A, (__v8sf)__B, -(__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfmsubaddps256_mask3( (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfmaddsubps256_maskz( (__v8sf)__A, (__v8sf)__B, -(__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps128_mask( (__v4sf)__A, (__v4sf)__B, -(__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfmsubaddps128_mask3( (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfmaddsubps128_maskz( (__v4sf)__A, (__v4sf)__B, -(__v4sf)__C, (__mmask8)__U); } __funline __m256d _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmaddpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfnmaddpd256_mask3((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmaddpd256_maskz((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmaddpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfnmaddpd128_mask3((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmaddpd128_maskz((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmaddps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfnmaddps256_mask3((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmaddps256_maskz((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmaddps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfnmaddps128_mask3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmaddps128_maskz((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m256d _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmsubpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { return (__m256d)__builtin_ia32_vfnmsubpd256_mask3((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m256d _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { return (__m256d)__builtin_ia32_vfnmsubpd256_maskz((__v4df)__A, (__v4df)__B, (__v4df)__C, (__mmask8)__U); } __funline __m128d _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmsubpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { return (__m128d)__builtin_ia32_vfnmsubpd128_mask3((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m128d _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { return (__m128d)__builtin_ia32_vfnmsubpd128_maskz((__v2df)__A, (__v2df)__B, (__v2df)__C, (__mmask8)__U); } __funline __m256 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmsubps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) { return (__m256)__builtin_ia32_vfnmsubps256_mask3((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m256 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) { return (__m256)__builtin_ia32_vfnmsubps256_maskz((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmsubps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) { return (__m128)__builtin_ia32_vfnmsubps128_mask3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) { return (__m128)__builtin_ia32_vfnmsubps128_maskz((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); } __funline __m128i _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandnd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandnd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandnd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandnd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_or_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pord256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pord256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_or_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8su)__A | (__v8su)__B); } __funline __m128i _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pord128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pord128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_or_epi32(__m128i __A, __m128i __B) { return (__m128i)((__v4su)__A | (__v4su)__B); } __funline __m256i _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pxord256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pxord256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_xor_epi32(__m256i __A, __m256i __B) { return (__m256i)((__v8su)__A ^ (__v8su)__B); } __funline __m128i _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pxord128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pxord128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_xor_epi32(__m128i __A, __m128i __B) { return (__m128i)((__v4su)__A ^ (__v4su)__B); } __funline __m128 _mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A) { return (__m128)__builtin_ia32_cvtpd2ps_mask((__v2df)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A) { return (__m128)__builtin_ia32_cvtpd2ps_mask( (__v2df)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A) { return (__m128)__builtin_ia32_cvtpd2ps256_mask((__v4df)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A) { return (__m128)__builtin_ia32_cvtpd2ps256_mask( (__v4df)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvtps2dq256_mask((__v8sf)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvtps2dq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvtps2dq128_mask((__v4sf)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvtps2dq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_cvtps_epu32(__m256 __A) { return (__m256i)__builtin_ia32_cvtps2udq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvtps2udq256_mask((__v8sf)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A) { return (__m256i)__builtin_ia32_cvtps2udq256_mask( (__v8sf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_cvtps_epu32(__m128 __A) { return (__m128i)__builtin_ia32_cvtps2udq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvtps2udq128_mask((__v4sf)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A) { return (__m128i)__builtin_ia32_cvtps2udq128_mask( (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256d _mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_movddup256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_movddup256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_movddup128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_movedup_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_movddup128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movshdup256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movshdup256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movshdup128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movshdup128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movsldup256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_movsldup256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movsldup128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_movsldup128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128i _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhdq128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhdq128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhdq256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhdq256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhqdq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckhqdq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhqdq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckhqdq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckldq128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpckldq128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckldq256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpckldq256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklqdq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_punpcklqdq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklqdq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_punpcklqdq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __mmask8 _mm_cmpeq_epu32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__A, (__v4si)__B, 0, (__mmask8)-1); } __funline __mmask8 _mm_cmpeq_epi32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__A, (__v4si)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpeq_epu32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__A, (__v4si)__B, 0, __U); } __funline __mmask8 _mm_mask_cmpeq_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__A, (__v4si)__B, __U); } __funline __mmask8 _mm256_cmpeq_epu32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__A, (__v8si)__B, 0, (__mmask8)-1); } __funline __mmask8 _mm256_cmpeq_epi32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__A, (__v8si)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpeq_epu32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__A, (__v8si)__B, 0, __U); } __funline __mmask8 _mm256_mask_cmpeq_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__A, (__v8si)__B, __U); } __funline __mmask8 _mm_cmpeq_epu64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__A, (__v2di)__B, 0, (__mmask8)-1); } __funline __mmask8 _mm_cmpeq_epi64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__A, (__v2di)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpeq_epu64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__A, (__v2di)__B, 0, __U); } __funline __mmask8 _mm_mask_cmpeq_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__A, (__v2di)__B, __U); } __funline __mmask8 _mm256_cmpeq_epu64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__A, (__v4di)__B, 0, (__mmask8)-1); } __funline __mmask8 _mm256_cmpeq_epi64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__A, (__v4di)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpeq_epu64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__A, (__v4di)__B, 0, __U); } __funline __mmask8 _mm256_mask_cmpeq_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__A, (__v4di)__B, __U); } __funline __mmask8 _mm_cmpgt_epu32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__A, (__v4si)__B, 6, (__mmask8)-1); } __funline __mmask8 _mm_cmpgt_epi32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__A, (__v4si)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpgt_epu32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__A, (__v4si)__B, 6, __U); } __funline __mmask8 _mm_mask_cmpgt_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__A, (__v4si)__B, __U); } __funline __mmask8 _mm256_cmpgt_epu32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__A, (__v8si)__B, 6, (__mmask8)-1); } __funline __mmask8 _mm256_cmpgt_epi32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__A, (__v8si)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpgt_epu32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__A, (__v8si)__B, 6, __U); } __funline __mmask8 _mm256_mask_cmpgt_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__A, (__v8si)__B, __U); } __funline __mmask8 _mm_cmpgt_epu64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__A, (__v2di)__B, 6, (__mmask8)-1); } __funline __mmask8 _mm_cmpgt_epi64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__A, (__v2di)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpgt_epu64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__A, (__v2di)__B, 6, __U); } __funline __mmask8 _mm_mask_cmpgt_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__A, (__v2di)__B, __U); } __funline __mmask8 _mm256_cmpgt_epu64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__A, (__v4di)__B, 6, (__mmask8)-1); } __funline __mmask8 _mm256_cmpgt_epi64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__A, (__v4di)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpgt_epu64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__A, (__v4di)__B, 6, __U); } __funline __mmask8 _mm256_mask_cmpgt_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__A, (__v4di)__B, __U); } __funline __mmask8 _mm_test_epi32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmd128((__v4si)__A, (__v4si)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmd128((__v4si)__A, (__v4si)__B, __U); } __funline __mmask8 _mm256_test_epi32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestmd256((__v8si)__A, (__v8si)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestmd256((__v8si)__A, (__v8si)__B, __U); } __funline __mmask8 _mm_test_epi64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmq128((__v2di)__A, (__v2di)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestmq128((__v2di)__A, (__v2di)__B, __U); } __funline __mmask8 _mm256_test_epi64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestmq256((__v4di)__A, (__v4di)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestmq256((__v4di)__A, (__v4di)__B, __U); } __funline __mmask8 _mm_testn_epi32_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmd128((__v4si)__A, (__v4si)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmd128((__v4si)__A, (__v4si)__B, __U); } __funline __mmask8 _mm256_testn_epi32_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestnmd256((__v8si)__A, (__v8si)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestnmd256((__v8si)__A, (__v8si)__B, __U); } __funline __mmask8 _mm_testn_epi64_mask(__m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmq128((__v2di)__A, (__v2di)__B, (__mmask8)-1); } __funline __mmask8 _mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) { return (__mmask8)__builtin_ia32_ptestnmq128((__v2di)__A, (__v2di)__B, __U); } __funline __mmask8 _mm256_testn_epi64_mask(__m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestnmq256((__v4di)__A, (__v4di)__B, (__mmask8)-1); } __funline __mmask8 _mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) { return (__mmask8)__builtin_ia32_ptestnmq256((__v4di)__A, (__v4di)__B, __U); } __funline __m256d _mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_compressdf256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_compress_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_compressdf256_mask( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline void _mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A) { __builtin_ia32_compressstoredf256_mask((__v4df *)__P, (__v4df)__A, (__mmask8)__U); } __funline __m128d _mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_compressdf128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_compress_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_compressdf128_mask( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline void _mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A) { __builtin_ia32_compressstoredf128_mask((__v2df *)__P, (__v2df)__A, (__mmask8)__U); } __funline __m256 _mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_compresssf256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_compress_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_compresssf256_mask( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline void _mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A) { __builtin_ia32_compressstoresf256_mask((__v8sf *)__P, (__v8sf)__A, (__mmask8)__U); } __funline __m128 _mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_compresssf128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_compress_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_compresssf128_mask( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline void _mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A) { __builtin_ia32_compressstoresf128_mask((__v4sf *)__P, (__v4sf)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_compressdi256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_compressdi256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline void _mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_compressstoredi256_mask((__v4di *)__P, (__v4di)__A, (__mmask8)__U); } __funline __m128i _mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_compressdi128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_compress_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_compressdi128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_compressstoredi128_mask((__v2di *)__P, (__v2di)__A, (__mmask8)__U); } __funline __m256i _mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_compresssi256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_compresssi256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline void _mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A) { __builtin_ia32_compressstoresi256_mask((__v8si *)__P, (__v8si)__A, (__mmask8)__U); } __funline __m128i _mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_compresssi128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_compress_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_compresssi128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline void _mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A) { __builtin_ia32_compressstoresi128_mask((__v4si *)__P, (__v4si)__A, (__mmask8)__U); } __funline __m256d _mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_expanddf256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_expand_pd(__mmask8 __U, __m256d __A) { return (__m256d)__builtin_ia32_expanddf256_maskz( (__v4df)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_expandloaddf256_mask( (__v4df *)__P, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P) { return (__m256d)__builtin_ia32_expandloaddf256_maskz( (__v4df *)__P, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_expanddf128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_expand_pd(__mmask8 __U, __m128d __A) { return (__m128d)__builtin_ia32_expanddf128_maskz( (__v2df)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_expandloaddf128_mask( (__v2df *)__P, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P) { return (__m128d)__builtin_ia32_expandloaddf128_maskz( (__v2df *)__P, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_expandsf256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_expand_ps(__mmask8 __U, __m256 __A) { return (__m256)__builtin_ia32_expandsf256_maskz( (__v8sf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_expandloadsf256_mask((__v8sf *)__P, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P) { return (__m256)__builtin_ia32_expandloadsf256_maskz( (__v8sf *)__P, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_expandsf128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_expand_ps(__mmask8 __U, __m128 __A) { return (__m128)__builtin_ia32_expandsf128_maskz( (__v4sf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_expandloadsf128_mask((__v4sf *)__P, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P) { return (__m128)__builtin_ia32_expandloadsf128_maskz( (__v4sf *)__P, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_expanddi256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_expanddi256_maskz( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_expandloaddi256_mask( (__v4di *)__P, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_expandloaddi256_maskz( (__v4di *)__P, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_expanddi128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_expand_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_expanddi128_maskz( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_expandloaddi128_mask( (__v2di *)__P, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_expandloaddi128_maskz( (__v2di *)__P, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_expandsi256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_expandsi256_maskz( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_expandloadsi256_mask( (__v8si *)__P, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) { return (__m256i)__builtin_ia32_expandloadsi256_maskz( (__v8si *)__P, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_expandsi128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_expand_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_expandsi128_maskz( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_expandloadsi128_mask( (__v4si *)__P, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) { return (__m128i)__builtin_ia32_expandloadsi128_maskz( (__v4si *)__P, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256d _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) { return (__m256d)__builtin_ia32_vpermt2varpd256_mask((__v4di)__I /* idx */, (__v4df)__A, (__v4df)__B, (__mmask8)-1); } __funline __m256d _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B) { return (__m256d)__builtin_ia32_vpermt2varpd256_mask((__v4di)__I /* idx */, (__v4df)__A, (__v4df)__B, (__mmask8)__U); } __funline __m256d _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B) { return (__m256d)__builtin_ia32_vpermi2varpd256_mask((__v4df)__A, (__v4di)__I /* idx */, (__v4df)__B, (__mmask8)__U); } __funline __m256d _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B) { return (__m256d)__builtin_ia32_vpermt2varpd256_maskz((__v4di)__I /* idx */, (__v4df)__A, (__v4df)__B, (__mmask8)__U); } __funline __m256 _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) { return (__m256)__builtin_ia32_vpermt2varps256_mask((__v8si)__I /* idx */, (__v8sf)__A, (__v8sf)__B, (__mmask8)-1); } __funline __m256 _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) { return (__m256)__builtin_ia32_vpermt2varps256_mask((__v8si)__I /* idx */, (__v8sf)__A, (__v8sf)__B, (__mmask8)__U); } __funline __m256 _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B) { return (__m256)__builtin_ia32_vpermi2varps256_mask((__v8sf)__A, (__v8si)__I /* idx */, (__v8sf)__B, (__mmask8)__U); } __funline __m256 _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B) { return (__m256)__builtin_ia32_vpermt2varps256_maskz((__v8si)__I /* idx */, (__v8sf)__A, (__v8sf)__B, (__mmask8)__U); } __funline __m128i _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varq128_mask((__v2di)__I /* idx */, (__v2di)__A, (__v2di)__B, (__mmask8)-1); } __funline __m128i _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varq128_mask((__v2di)__I /* idx */, (__v2di)__A, (__v2di)__B, (__mmask8)__U); } __funline __m128i _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpermi2varq128_mask((__v2di)__A, (__v2di)__I /* idx */, (__v2di)__B, (__mmask8)__U); } __funline __m128i _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2varq128_maskz((__v2di)__I /* idx */, (__v2di)__A, (__v2di)__B, (__mmask8)__U); } __funline __m128i _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2vard128_mask((__v4si)__I /* idx */, (__v4si)__A, (__v4si)__B, (__mmask8)-1); } __funline __m128i _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2vard128_mask((__v4si)__I /* idx */, (__v4si)__A, (__v4si)__B, (__mmask8)__U); } __funline __m128i _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpermi2vard128_mask((__v4si)__A, (__v4si)__I /* idx */, (__v4si)__B, (__mmask8)__U); } __funline __m128i _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) { return (__m128i)__builtin_ia32_vpermt2vard128_maskz((__v4si)__I /* idx */, (__v4si)__A, (__v4si)__B, (__mmask8)__U); } __funline __m256i _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varq256_mask((__v4di)__I /* idx */, (__v4di)__A, (__v4di)__B, (__mmask8)-1); } __funline __m256i _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varq256_mask((__v4di)__I /* idx */, (__v4di)__A, (__v4di)__B, (__mmask8)__U); } __funline __m256i _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpermi2varq256_mask((__v4di)__A, (__v4di)__I /* idx */, (__v4di)__B, (__mmask8)__U); } __funline __m256i _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2varq256_maskz((__v4di)__I /* idx */, (__v4di)__A, (__v4di)__B, (__mmask8)__U); } __funline __m256i _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2vard256_mask((__v8si)__I /* idx */, (__v8si)__A, (__v8si)__B, (__mmask8)-1); } __funline __m256i _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2vard256_mask((__v8si)__I /* idx */, (__v8si)__A, (__v8si)__B, (__mmask8)__U); } __funline __m256i _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpermi2vard256_mask((__v8si)__A, (__v8si)__I /* idx */, (__v8si)__B, (__mmask8)__U); } __funline __m256i _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) { return (__m256i)__builtin_ia32_vpermt2vard256_maskz((__v8si)__I /* idx */, (__v8si)__A, (__v8si)__B, (__mmask8)__U); } __funline __m128d _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) { return (__m128d)__builtin_ia32_vpermt2varpd128_mask((__v2di)__I /* idx */, (__v2df)__A, (__v2df)__B, (__mmask8)-1); } __funline __m128d _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) { return (__m128d)__builtin_ia32_vpermt2varpd128_mask((__v2di)__I /* idx */, (__v2df)__A, (__v2df)__B, (__mmask8)__U); } __funline __m128d _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) { return (__m128d)__builtin_ia32_vpermi2varpd128_mask((__v2df)__A, (__v2di)__I /* idx */, (__v2df)__B, (__mmask8)__U); } __funline __m128d _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) { return (__m128d)__builtin_ia32_vpermt2varpd128_maskz((__v2di)__I /* idx */, (__v2df)__A, (__v2df)__B, (__mmask8)__U); } __funline __m128 _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) { return (__m128)__builtin_ia32_vpermt2varps128_mask((__v4si)__I /* idx */, (__v4sf)__A, (__v4sf)__B, (__mmask8)-1); } __funline __m128 _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) { return (__m128)__builtin_ia32_vpermt2varps128_mask((__v4si)__I /* idx */, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U); } __funline __m128 _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) { return (__m128)__builtin_ia32_vpermi2varps128_mask((__v4sf)__A, (__v4si)__I /* idx */, (__v4sf)__B, (__mmask8)__U); } __funline __m128 _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) { return (__m128)__builtin_ia32_vpermt2varps128_maskz((__v4si)__I /* idx */, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U); } __funline __m128i _mm_srav_epi64(__m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psravq128_mask( (__v2di)__X, (__v2di)__Y, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psravq128_mask((__v2di)__X, (__v2di)__Y, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psravq128_mask( (__v2di)__X, (__v2di)__Y, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv8si_mask((__v8si)__X, (__v8si)__Y, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv8si_mask( (__v8si)__X, (__v8si)__Y, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv4si_mask((__v4si)__X, (__v4si)__Y, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv4si_mask( (__v4si)__X, (__v4si)__Y, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv4di_mask((__v4di)__X, (__v4di)__Y, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psllv4di_mask( (__v4di)__X, (__v4di)__Y, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv2di_mask((__v2di)__X, (__v2di)__Y, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psllv2di_mask( (__v2di)__X, (__v2di)__Y, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrav8si_mask((__v8si)__X, (__v8si)__Y, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrav8si_mask( (__v8si)__X, (__v8si)__Y, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrav4si_mask((__v4si)__X, (__v4si)__Y, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrav4si_mask( (__v4si)__X, (__v4si)__Y, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv8si_mask((__v8si)__X, (__v8si)__Y, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv8si_mask( (__v8si)__X, (__v8si)__Y, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv4si_mask((__v4si)__X, (__v4si)__Y, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv4si_mask( (__v4si)__X, (__v4si)__Y, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv4di_mask((__v4di)__X, (__v4di)__Y, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psrlv4di_mask( (__v4di)__X, (__v4di)__Y, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv2di_mask((__v2di)__X, (__v2di)__Y, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_psrlv2di_mask( (__v2di)__X, (__v2di)__Y, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rolv_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rolv_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rorv_epi32(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rorv_epi32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rolv_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prolvq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rolv_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prolvq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rorv_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_prorvq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rorv_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_prorvq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_srav_epi64(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psravq256_mask( (__v4di)__X, (__v4di)__Y, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psravq256_mask((__v4di)__X, (__v4di)__Y, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_psravq256_mask( (__v4di)__X, (__v4di)__Y, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __U); } __funline __m256i _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_pd(), __U); } __funline __m128i _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __U); } __funline __m128i _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandq128_mask((__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_pd(), __U); } __funline __m256i _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandnq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __U); } __funline __m256i _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pandnq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_pd(), __U); } __funline __m128i _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandnq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __U); } __funline __m128i _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pandnq128_mask((__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_pd(), __U); } __funline __m256i _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_porq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_porq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_or_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A | (__v4du)__B); } __funline __m128i _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_porq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_porq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_or_epi64(__m128i __A, __m128i __B) { return (__m128i)((__v2du)__A | (__v2du)__B); } __funline __m256i _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pxorq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pxorq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_xor_epi64(__m256i __A, __m256i __B) { return (__m256i)((__v4du)__A ^ (__v4du)__B); } __funline __m128i _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pxorq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pxorq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_xor_epi64(__m128i __A, __m128i __B) { return (__m128i)((__v2du)__A ^ (__v2du)__B); } __funline __m256d _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_maxpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_maxpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_maxps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_maxps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_divps_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_divps_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_divpd_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_divpd_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_minpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_divpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_minpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_minps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_divpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_divps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_minps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_divps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minps_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_mulps_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_minps_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_mulps_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxps_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_maxps_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_minpd_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_minpd_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_maxpd_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_maxpd_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_mulpd_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_mulpd_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_mulps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_mulps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_mulpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_mulpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256i _mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __M); } __funline __m256i _mm256_min_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __M); } __funline __m256i _mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_max_epi64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_max_epu64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxuq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __M); } __funline __m256i _mm256_min_epu64(__m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuq256_mask((__v4di)__A, (__v4di)__B, (__v4di)__W, __M); } __funline __m256i _mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminuq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxsd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, __M); } __funline __m256i _mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminsd256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, __M); } __funline __m256i _mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxud256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmaxud256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, __M); } __funline __m256i _mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminud256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pminud256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, __M); } __funline __m128i _mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __M); } __funline __m128i _mm_min_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __M); } __funline __m128i _mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), __M); } __funline __m128i _mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxuq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), __M); } __funline __m128i _mm_max_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_max_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxuq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxuq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __M); } __funline __m128i _mm_min_epu64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminuq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminuq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, __M); } __funline __m128i _mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminuq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), __M); } __funline __m128i _mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxsd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, __M); } __funline __m128i _mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminsd128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, __M); } __funline __m128i _mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxud128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmaxud128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, __M); } __funline __m128i _mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminud128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pminud128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, __M); } #ifndef __AVX512CD__ #pragma GCC push_options #pragma GCC target("avx512vl,avx512cd") #define __DISABLE_AVX512VLCD__ #endif __funline __m128i _mm_broadcastmb_epi64(__mmask8 __A) { return (__m128i)__builtin_ia32_broadcastmb128(__A); } __funline __m256i _mm256_broadcastmb_epi64(__mmask8 __A) { return (__m256i)__builtin_ia32_broadcastmb256(__A); } __funline __m128i _mm_broadcastmw_epi32(__mmask16 __A) { return (__m128i)__builtin_ia32_broadcastmw128(__A); } __funline __m256i _mm256_broadcastmw_epi32(__mmask16 __A) { return (__m256i)__builtin_ia32_broadcastmw256(__A); } __funline __m256i _mm256_lzcnt_epi32(__m256i __A) { return (__m256i)__builtin_ia32_vplzcntd_256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_lzcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vplzcntd_256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_lzcnt_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vplzcntd_256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_lzcnt_epi64(__m256i __A) { return (__m256i)__builtin_ia32_vplzcntq_256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_lzcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vplzcntq_256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_lzcnt_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vplzcntq_256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_conflict_epi64(__m256i __A) { return (__m256i)__builtin_ia32_vpconflictdi_256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_conflict_epi64(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpconflictdi_256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_conflict_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpconflictdi_256_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_conflict_epi32(__m256i __A) { return (__m256i)__builtin_ia32_vpconflictsi_256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_conflict_epi32(__m256i __W, __mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpconflictsi_256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_conflict_epi32(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpconflictsi_256_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_lzcnt_epi32(__m128i __A) { return (__m128i)__builtin_ia32_vplzcntd_128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_lzcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vplzcntd_128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_lzcnt_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vplzcntd_128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_lzcnt_epi64(__m128i __A) { return (__m128i)__builtin_ia32_vplzcntq_128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_lzcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vplzcntq_128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_lzcnt_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vplzcntq_128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_conflict_epi64(__m128i __A) { return (__m128i)__builtin_ia32_vpconflictdi_128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_conflict_epi64(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpconflictdi_128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_conflict_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpconflictdi_128_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_conflict_epi32(__m128i __A) { return (__m128i)__builtin_ia32_vpconflictsi_128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_conflict_epi32(__m128i __W, __mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpconflictsi_128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_conflict_epi32(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpconflictsi_128_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } #ifdef __DISABLE_AVX512VLCD__ #pragma GCC pop_options #endif __funline __m256d _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpcklpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpcklpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_unpcklpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_unpcklpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpcklps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256d _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpckhpd256_mask((__v4df)__A, (__v4df)__B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) { return (__m256d)__builtin_ia32_unpckhpd256_mask( (__v4df)__A, (__v4df)__B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_unpckhpd128_mask((__v2df)__A, (__v2df)__B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) { return (__m128d)__builtin_ia32_unpckhpd128_mask( (__v2df)__A, (__v2df)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpckhps256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpckhps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpckhps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpckhps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_vcvtph2ps_mask((__v8hi)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) { return (__m128)__builtin_ia32_vcvtph2ps_mask( (__v8hi)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) { return (__m256)__builtin_ia32_unpcklps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256 _mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) { return (__m256)__builtin_ia32_vcvtph2ps256_mask((__v8hi)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) { return (__m256)__builtin_ia32_vcvtph2ps256_mask( (__v8hi)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpcklps128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) { return (__m128)__builtin_ia32_unpcklps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrad256_mask((__v8si)__A, (__v4si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psrad256_mask( (__v8si)__A, (__v4si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrad128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psrad128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_sra_epi64(__m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraq256_mask( (__v4di)__A, (__v2di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraq256_mask((__v4di)__A, (__v2di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psraq256_mask( (__v4di)__A, (__v2di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_sra_epi64(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psraq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pslld128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pslld128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllq128_mask((__v2di)__A, (__v2di)__B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_psllq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_pslld256_mask((__v8si)__A, (__v4si)__B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_pslld256_mask( (__v8si)__A, (__v4si)__B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllq256_mask((__v4di)__A, (__v2di)__B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) { return (__m256i)__builtin_ia32_psllq256_mask( (__v4di)__A, (__v2di)__B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256 _mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y) { return (__m256)__builtin_ia32_permvarsf256_mask((__v8sf)__Y, (__v8si)__X, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y) { return (__m256)__builtin_ia32_permvarsf256_mask( (__v8sf)__Y, (__v8si)__X, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_permutexvar_pd(__m256i __X, __m256d __Y) { return (__m256d)__builtin_ia32_permvardf256_mask( (__v4df)__Y, (__v4di)__X, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y) { return (__m256d)__builtin_ia32_permvardf256_mask((__v4df)__Y, (__v4di)__X, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y) { return (__m256d)__builtin_ia32_permvardf256_mask( (__v4df)__Y, (__v4di)__X, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) { return (__m256d)__builtin_ia32_vpermilvarpd256_mask( (__v4df)__A, (__v4di)__C, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) { return (__m256d)__builtin_ia32_vpermilvarpd256_mask( (__v4df)__A, (__v4di)__C, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) { return (__m256)__builtin_ia32_vpermilvarps256_mask( (__v8sf)__A, (__v8si)__C, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) { return (__m256)__builtin_ia32_vpermilvarps256_mask( (__v8sf)__A, (__v8si)__C, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) { return (__m128d)__builtin_ia32_vpermilvarpd_mask((__v2df)__A, (__v2di)__C, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) { return (__m128d)__builtin_ia32_vpermilvarpd_mask( (__v2df)__A, (__v2di)__C, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) { return (__m128)__builtin_ia32_vpermilvarps_mask((__v4sf)__A, (__v4si)__C, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) { return (__m128)__builtin_ia32_vpermilvarps_mask( (__v4sf)__A, (__v4si)__C, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulld256_mask( (__v8si)__A, (__v8si)__B, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvardi256_mask( (__v4di)__Y, (__v4di)__X, (__v4di)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { return (__m256i)__builtin_ia32_pmulld256_mask((__v8si)__A, (__v8si)__B, (__v8si)__W, __M); } __funline __m128i _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulld128_mask( (__v4si)__A, (__v4si)__B, (__v4si)_mm_setzero_si128(), __M); } __funline __m128i _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_pmulld128_mask((__v4si)__A, (__v4si)__B, (__v4si)__W, __M); } __funline __m256i _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmuldq256_mask((__v8si)__X, (__v8si)__Y, (__v4di)__W, __M); } __funline __m256i _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmuldq256_mask( (__v8si)__X, (__v8si)__Y, (__v4di)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmuldq128_mask((__v4si)__X, (__v4si)__Y, (__v2di)__W, __M); } __funline __m128i _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmuldq128_mask( (__v4si)__X, (__v4si)__Y, (__v2di)_mm_setzero_si128(), __M); } __funline __m256i _mm256_permutexvar_epi64(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvardi256_mask( (__v4di)__Y, (__v4di)__X, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvardi256_mask((__v4di)__Y, (__v4di)__X, (__v4di)__W, __M); } __funline __m256i _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmuludq256_mask((__v8si)__X, (__v8si)__Y, (__v4di)__W, __M); } __funline __m256i _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvarsi256_mask( (__v8si)__Y, (__v8si)__X, (__v8si)_mm256_setzero_si256(), __M); } __funline __m256i _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_pmuludq256_mask( (__v8si)__X, (__v8si)__Y, (__v4di)_mm256_setzero_si256(), __M); } __funline __m128i _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmuludq128_mask((__v4si)__X, (__v4si)__Y, (__v2di)__W, __M); } __funline __m128i _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_pmuludq128_mask( (__v4si)__X, (__v4si)__Y, (__v2di)_mm_setzero_si128(), __M); } __funline __m256i _mm256_permutexvar_epi32(__m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvarsi256_mask( (__v8si)__Y, (__v8si)__X, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_permvarsi256_mask((__v8si)__Y, (__v8si)__X, (__v8si)__W, __M); } __funline __mmask8 _mm256_mask_cmpneq_epu32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm256_cmpneq_epu32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmplt_epu32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm256_cmplt_epu32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpge_epu32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm256_cmpge_epu32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmple_epu32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm256_cmple_epu32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpneq_epu64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm256_cmpneq_epu64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmplt_epu64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm256_cmplt_epu64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpge_epu64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm256_cmpge_epu64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmple_epu64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm256_cmple_epu64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpneq_epi32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm256_cmpneq_epi32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmplt_epi32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm256_cmplt_epi32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpge_epi32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm256_cmpge_epi32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmple_epi32_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm256_cmple_epi32_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpneq_epi64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm256_cmpneq_epi64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmplt_epi64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm256_cmplt_epi64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmpge_epi64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm256_cmpge_epi64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmple_epi64_mask(__mmask8 __M, __m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm256_cmple_epi64_mask(__m256i __X, __m256i __Y) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpneq_epu32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_cmpneq_epu32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmplt_epu32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_cmplt_epu32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpge_epu32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_cmpge_epu32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmple_epu32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm_cmple_epu32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpneq_epu64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_cmpneq_epu64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmplt_epu64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_cmplt_epu64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpge_epu64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_cmpge_epu64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmple_epu64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm_cmple_epu64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpneq_epi32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_cmpneq_epi32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmplt_epi32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_cmplt_epi32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpge_epi32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_cmpge_epi32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmple_epi32_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm_cmple_epi32_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, 2, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpneq_epi64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 4, (__mmask8)__M); } __funline __mmask8 _mm_cmpneq_epi64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 4, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmplt_epi64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 1, (__mmask8)__M); } __funline __mmask8 _mm_cmplt_epi64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 1, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmpge_epi64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 5, (__mmask8)__M); } __funline __mmask8 _mm_cmpge_epi64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 5, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmple_epi64_mask(__mmask8 __M, __m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 2, (__mmask8)__M); } __funline __mmask8 _mm_cmple_epi64_mask(__m128i __X, __m128i __Y) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, 2, (__mmask8)-1); } #ifdef __OPTIMIZE__ __funline __m256i _mm256_permutex_epi64(__m256i __X, const int __I) { return (__m256i)__builtin_ia32_permdi256_mask( (__v4di)__X, __I, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_permutex_epi64(__m256i __W, __mmask8 __M, __m256i __X, const int __I) { return (__m256i)__builtin_ia32_permdi256_mask((__v4di)__X, __I, (__v4di)__W, (__mmask8)__M); } __funline __m256i _mm256_maskz_permutex_epi64(__mmask8 __M, __m256i __X, const int __I) { return (__m256i)__builtin_ia32_permdi256_mask( (__v4di)__X, __I, (__v4di)_mm256_setzero_si256(), (__mmask8)__M); } __funline __m256d _mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B, const int __imm) { return (__m256d)__builtin_ia32_shufpd256_mask((__v4df)__A, (__v4df)__B, __imm, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B, const int __imm) { return (__m256d)__builtin_ia32_shufpd256_mask((__v4df)__A, (__v4df)__B, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, const int __imm) { return (__m128d)__builtin_ia32_shufpd128_mask((__v2df)__A, (__v2df)__B, __imm, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B, const int __imm) { return (__m128d)__builtin_ia32_shufpd128_mask( (__v2df)__A, (__v2df)__B, __imm, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, const int __imm) { return (__m256)__builtin_ia32_shufps256_mask((__v8sf)__A, (__v8sf)__B, __imm, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B, const int __imm) { return (__m256)__builtin_ia32_shufps256_mask((__v8sf)__A, (__v8sf)__B, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, const int __imm) { return (__m128)__builtin_ia32_shufps128_mask((__v4sf)__A, (__v4sf)__B, __imm, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B, const int __imm) { return (__m128)__builtin_ia32_shufps128_mask( (__v4sf)__A, (__v4sf)__B, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_inserti32x4(__m256i __A, __m128i __B, const int __imm) { return (__m256i)__builtin_ia32_inserti32x4_256_mask( (__v8si)__A, (__v4si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_inserti32x4(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B, const int __imm) { return (__m256i)__builtin_ia32_inserti32x4_256_mask( (__v8si)__A, (__v4si)__B, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_inserti32x4(__mmask8 __U, __m256i __A, __m128i __B, const int __imm) { return (__m256i)__builtin_ia32_inserti32x4_256_mask( (__v8si)__A, (__v4si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256 _mm256_insertf32x4(__m256 __A, __m128 __B, const int __imm) { return (__m256)__builtin_ia32_insertf32x4_256_mask( (__v8sf)__A, (__v4sf)__B, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_insertf32x4(__m256 __W, __mmask8 __U, __m256 __A, __m128 __B, const int __imm) { return (__m256)__builtin_ia32_insertf32x4_256_mask( (__v8sf)__A, (__v4sf)__B, __imm, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_insertf32x4(__mmask8 __U, __m256 __A, __m128 __B, const int __imm) { return (__m256)__builtin_ia32_insertf32x4_256_mask( (__v8sf)__A, (__v4sf)__B, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128i _mm256_extracti32x4_epi32(__m256i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_256_mask( (__v8si)__A, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm256_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m256i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_256_mask( (__v8si)__A, __imm, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_extracti32x4_epi32(__mmask8 __U, __m256i __A, const int __imm) { return (__m128i)__builtin_ia32_extracti32x4_256_mask( (__v8si)__A, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128 _mm256_extractf32x4_ps(__m256 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_256_mask( (__v8sf)__A, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_256_mask( (__v8sf)__A, __imm, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A, const int __imm) { return (__m128)__builtin_ia32_extractf32x4_256_mask( (__v8sf)__A, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256i _mm256_shuffle_i64x2(__m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i64x2_256_mask( (__v4di)__A, (__v4di)__B, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i64x2_256_mask( (__v4di)__A, (__v4di)__B, __imm, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i64x2_256_mask( (__v4di)__A, (__v4di)__B, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_shuffle_i32x4(__m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i32x4_256_mask( (__v8si)__A, (__v8si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i32x4_256_mask( (__v8si)__A, (__v8si)__B, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_shuf_i32x4_256_mask( (__v8si)__A, (__v8si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256d _mm256_shuffle_f64x2(__m256d __A, __m256d __B, const int __imm) { return (__m256d)__builtin_ia32_shuf_f64x2_256_mask( (__v4df)__A, (__v4df)__B, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B, const int __imm) { return (__m256d)__builtin_ia32_shuf_f64x2_256_mask( (__v4df)__A, (__v4df)__B, __imm, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B, const int __imm) { return (__m256d)__builtin_ia32_shuf_f64x2_256_mask( (__v4df)__A, (__v4df)__B, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_shuffle_f32x4(__m256 __A, __m256 __B, const int __imm) { return (__m256)__builtin_ia32_shuf_f32x4_256_mask( (__v8sf)__A, (__v8sf)__B, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, const int __imm) { return (__m256)__builtin_ia32_shuf_f32x4_256_mask( (__v8sf)__A, (__v8sf)__B, __imm, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B, const int __imm) { return (__m256)__builtin_ia32_shuf_f32x4_256_mask( (__v8sf)__A, (__v8sf)__B, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_fixupimm_pd(__m256d __A, __m256d __B, __m256i __C, const int __imm) { return (__m256d)__builtin_ia32_fixupimmpd256_mask( (__v4df)__A, (__v4df)__B, (__v4di)__C, __imm, (__mmask8)-1); } __funline __m256d _mm256_mask_fixupimm_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256i __C, const int __imm) { return (__m256d)__builtin_ia32_fixupimmpd256_mask( (__v4df)__A, (__v4df)__B, (__v4di)__C, __imm, (__mmask8)__U); } __funline __m256d _mm256_maskz_fixupimm_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256i __C, const int __imm) { return (__m256d)__builtin_ia32_fixupimmpd256_maskz( (__v4df)__A, (__v4df)__B, (__v4di)__C, __imm, (__mmask8)__U); } __funline __m256 _mm256_fixupimm_ps(__m256 __A, __m256 __B, __m256i __C, const int __imm) { return (__m256)__builtin_ia32_fixupimmps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8si)__C, __imm, (__mmask8)-1); } __funline __m256 _mm256_mask_fixupimm_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256i __C, const int __imm) { return (__m256)__builtin_ia32_fixupimmps256_mask( (__v8sf)__A, (__v8sf)__B, (__v8si)__C, __imm, (__mmask8)__U); } __funline __m256 _mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256i __C, const int __imm) { return (__m256)__builtin_ia32_fixupimmps256_maskz( (__v8sf)__A, (__v8sf)__B, (__v8si)__C, __imm, (__mmask8)__U); } __funline __m128d _mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmpd128_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)-1); } __funline __m128d _mm_mask_fixupimm_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmpd128_mask( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U); } __funline __m128d _mm_maskz_fixupimm_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C, const int __imm) { return (__m128d)__builtin_ia32_fixupimmpd128_maskz( (__v2df)__A, (__v2df)__B, (__v2di)__C, __imm, (__mmask8)__U); } __funline __m128 _mm_fixupimm_ps(__m128 __A, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)-1); } __funline __m128 _mm_mask_fixupimm_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmps128_mask( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U); } __funline __m128 _mm_maskz_fixupimm_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C, const int __imm) { return (__m128)__builtin_ia32_fixupimmps128_maskz( (__v4sf)__A, (__v4sf)__B, (__v4si)__C, __imm, (__mmask8)__U); } __funline __m256i _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrldi256_mask((__v8si)__A, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrldi256_mask( (__v8si)__A, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrldi128_mask((__v4si)__A, __imm, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrldi128_mask( (__v4si)__A, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)__A, __imm, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psrlqi256_mask( (__v4di)__A, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)__A, __imm, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psrlqi128_mask( (__v2di)__A, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_ternarylogic_epi64(__m256i __A, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)__C, __imm, (__mmask8)-1); } __funline __m256i _mm256_mask_ternarylogic_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogq256_mask( (__v4di)__A, (__v4di)__B, (__v4di)__C, __imm, (__mmask8)__U); } __funline __m256i _mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogq256_maskz( (__v4di)__A, (__v4di)__B, (__v4di)__C, __imm, (__mmask8)__U); } __funline __m256i _mm256_ternarylogic_epi32(__m256i __A, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)__C, __imm, (__mmask8)-1); } __funline __m256i _mm256_mask_ternarylogic_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogd256_mask( (__v8si)__A, (__v8si)__B, (__v8si)__C, __imm, (__mmask8)__U); } __funline __m256i _mm256_maskz_ternarylogic_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C, const int __imm) { return (__m256i)__builtin_ia32_pternlogd256_maskz( (__v8si)__A, (__v8si)__B, (__v8si)__C, __imm, (__mmask8)__U); } __funline __m128i _mm_ternarylogic_epi64(__m128i __A, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)__C, __imm, (__mmask8)-1); } __funline __m128i _mm_mask_ternarylogic_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogq128_mask( (__v2di)__A, (__v2di)__B, (__v2di)__C, __imm, (__mmask8)__U); } __funline __m128i _mm_maskz_ternarylogic_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogq128_maskz( (__v2di)__A, (__v2di)__B, (__v2di)__C, __imm, (__mmask8)__U); } __funline __m128i _mm_ternarylogic_epi32(__m128i __A, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)__C, __imm, (__mmask8)-1); } __funline __m128i _mm_mask_ternarylogic_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogd128_mask( (__v4si)__A, (__v4si)__B, (__v4si)__C, __imm, (__mmask8)__U); } __funline __m128i _mm_maskz_ternarylogic_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C, const int __imm) { return (__m128i)__builtin_ia32_pternlogd128_maskz( (__v4si)__A, (__v4si)__B, (__v4si)__C, __imm, (__mmask8)__U); } __funline __m256 _mm256_roundscale_ps(__m256 __A, const int __imm) { return (__m256)__builtin_ia32_rndscaleps_256_mask( (__v8sf)__A, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_roundscale_ps(__m256 __W, __mmask8 __U, __m256 __A, const int __imm) { return (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)__A, __imm, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_roundscale_ps(__mmask8 __U, __m256 __A, const int __imm) { return (__m256)__builtin_ia32_rndscaleps_256_mask( (__v8sf)__A, __imm, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_roundscale_pd(__m256d __A, const int __imm) { return (__m256d)__builtin_ia32_rndscalepd_256_mask( (__v4df)__A, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_roundscale_pd(__m256d __W, __mmask8 __U, __m256d __A, const int __imm) { return (__m256d)__builtin_ia32_rndscalepd_256_mask( (__v4df)__A, __imm, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_roundscale_pd(__mmask8 __U, __m256d __A, const int __imm) { return (__m256d)__builtin_ia32_rndscalepd_256_mask( (__v4df)__A, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128 _mm_roundscale_ps(__m128 __A, const int __imm) { return (__m128)__builtin_ia32_rndscaleps_128_mask( (__v4sf)__A, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_roundscale_ps(__m128 __W, __mmask8 __U, __m128 __A, const int __imm) { return (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)__A, __imm, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_roundscale_ps(__mmask8 __U, __m128 __A, const int __imm) { return (__m128)__builtin_ia32_rndscaleps_128_mask( (__v4sf)__A, __imm, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m128d _mm_roundscale_pd(__m128d __A, const int __imm) { return (__m128d)__builtin_ia32_rndscalepd_128_mask( (__v2df)__A, __imm, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_roundscale_pd(__m128d __W, __mmask8 __U, __m128d __A, const int __imm) { return (__m128d)__builtin_ia32_rndscalepd_128_mask( (__v2df)__A, __imm, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_roundscale_pd(__mmask8 __U, __m128d __A, const int __imm) { return (__m128d)__builtin_ia32_rndscalepd_128_mask( (__v2df)__A, __imm, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_getmant_ps(__m256 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256)__builtin_ia32_getmantps256_mask( (__v8sf)__A, (__C << 2) | __B, (__v8sf)_mm256_setzero_ps(), (__mmask8)-1); } __funline __m256 _mm256_mask_getmant_ps(__m256 __W, __mmask8 __U, __m256 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256)__builtin_ia32_getmantps256_mask((__v8sf)__A, (__C << 2) | __B, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_getmant_ps(__mmask8 __U, __m256 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256)__builtin_ia32_getmantps256_mask((__v8sf)__A, (__C << 2) | __B, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_getmant_ps(__m128 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128)__builtin_ia32_getmantps128_mask( (__v4sf)__A, (__C << 2) | __B, (__v4sf)_mm_setzero_ps(), (__mmask8)-1); } __funline __m128 _mm_mask_getmant_ps(__m128 __W, __mmask8 __U, __m128 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128)__builtin_ia32_getmantps128_mask((__v4sf)__A, (__C << 2) | __B, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_getmant_ps(__mmask8 __U, __m128 __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128)__builtin_ia32_getmantps128_mask( (__v4sf)__A, (__C << 2) | __B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_getmant_pd(__m256d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256d)__builtin_ia32_getmantpd256_mask( (__v4df)__A, (__C << 2) | __B, (__v4df)_mm256_setzero_pd(), (__mmask8)-1); } __funline __m256d _mm256_mask_getmant_pd(__m256d __W, __mmask8 __U, __m256d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256d)__builtin_ia32_getmantpd256_mask( (__v4df)__A, (__C << 2) | __B, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_getmant_pd(__mmask8 __U, __m256d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m256d)__builtin_ia32_getmantpd256_mask( (__v4df)__A, (__C << 2) | __B, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_getmant_pd(__m128d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128d)__builtin_ia32_getmantpd128_mask( (__v2df)__A, (__C << 2) | __B, (__v2df)_mm_setzero_pd(), (__mmask8)-1); } __funline __m128d _mm_mask_getmant_pd(__m128d __W, __mmask8 __U, __m128d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128d)__builtin_ia32_getmantpd128_mask( (__v2df)__A, (__C << 2) | __B, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_getmant_pd(__mmask8 __U, __m128d __A, _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) { return (__m128d)__builtin_ia32_getmantpd128_mask( (__v2df)__A, (__C << 2) | __B, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mmask_i32gather_ps(__m256 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m256)__builtin_ia32_gather3siv8sf((__v8sf)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m128 _mm_mmask_i32gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128)__builtin_ia32_gather3siv4sf((__v4sf)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m256d _mm256_mmask_i32gather_pd(__m256d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m256d)__builtin_ia32_gather3siv4df( (__v4df)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m128d _mm_mmask_i32gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128d)__builtin_ia32_gather3siv2df( (__v2df)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m128 _mm256_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m128)__builtin_ia32_gather3div8sf((__v4sf)__v1_old, __addr, (__v4di)__index, __mask, __scale); } __funline __m128 _mm_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128)__builtin_ia32_gather3div4sf((__v4sf)__v1_old, __addr, (__v2di)__index, __mask, __scale); } __funline __m256d _mm256_mmask_i64gather_pd(__m256d __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m256d)__builtin_ia32_gather3div4df( (__v4df)__v1_old, __addr, (__v4di)__index, __mask, __scale); } __funline __m128d _mm_mmask_i64gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128d)__builtin_ia32_gather3div2df( (__v2df)__v1_old, __addr, (__v2di)__index, __mask, __scale); } __funline __m256i _mm256_mmask_i32gather_epi32(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m256i)__builtin_ia32_gather3siv8si( (__v8si)__v1_old, __addr, (__v8si)__index, __mask, __scale); } __funline __m128i _mm_mmask_i32gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128i)__builtin_ia32_gather3siv4si( (__v4si)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m256i _mm256_mmask_i32gather_epi64(__m256i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m256i)__builtin_ia32_gather3siv4di( (__v4di)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m128i _mm_mmask_i32gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128i)__builtin_ia32_gather3siv2di( (__v2di)__v1_old, __addr, (__v4si)__index, __mask, __scale); } __funline __m128i _mm256_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m128i)__builtin_ia32_gather3div8si( (__v4si)__v1_old, __addr, (__v4di)__index, __mask, __scale); } __funline __m128i _mm_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128i)__builtin_ia32_gather3div4si( (__v4si)__v1_old, __addr, (__v2di)__index, __mask, __scale); } __funline __m256i _mm256_mmask_i64gather_epi64(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr, int __scale) { return (__m256i)__builtin_ia32_gather3div4di( (__v4di)__v1_old, __addr, (__v4di)__index, __mask, __scale); } __funline __m128i _mm_mmask_i64gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr, int __scale) { return (__m128i)__builtin_ia32_gather3div2di( (__v2di)__v1_old, __addr, (__v2di)__index, __mask, __scale); } __funline void _mm256_i32scatter_ps(void *__addr, __m256i __index, __m256 __v1, const int __scale) { __builtin_ia32_scattersiv8sf(__addr, (__mmask8)0xFF, (__v8si)__index, (__v8sf)__v1, __scale); } __funline void _mm256_mask_i32scatter_ps(void *__addr, __mmask8 __mask, __m256i __index, __m256 __v1, const int __scale) { __builtin_ia32_scattersiv8sf(__addr, __mask, (__v8si)__index, (__v8sf)__v1, __scale); } __funline void _mm_i32scatter_ps(void *__addr, __m128i __index, __m128 __v1, const int __scale) { __builtin_ia32_scattersiv4sf(__addr, (__mmask8)0xFF, (__v4si)__index, (__v4sf)__v1, __scale); } __funline void _mm_mask_i32scatter_ps(void *__addr, __mmask8 __mask, __m128i __index, __m128 __v1, const int __scale) { __builtin_ia32_scattersiv4sf(__addr, __mask, (__v4si)__index, (__v4sf)__v1, __scale); } __funline void _mm256_i32scatter_pd(void *__addr, __m128i __index, __m256d __v1, const int __scale) { __builtin_ia32_scattersiv4df(__addr, (__mmask8)0xFF, (__v4si)__index, (__v4df)__v1, __scale); } __funline void _mm256_mask_i32scatter_pd(void *__addr, __mmask8 __mask, __m128i __index, __m256d __v1, const int __scale) { __builtin_ia32_scattersiv4df(__addr, __mask, (__v4si)__index, (__v4df)__v1, __scale); } __funline void _mm_i32scatter_pd(void *__addr, __m128i __index, __m128d __v1, const int __scale) { __builtin_ia32_scattersiv2df(__addr, (__mmask8)0xFF, (__v4si)__index, (__v2df)__v1, __scale); } __funline void _mm_mask_i32scatter_pd(void *__addr, __mmask8 __mask, __m128i __index, __m128d __v1, const int __scale) { __builtin_ia32_scattersiv2df(__addr, __mask, (__v4si)__index, (__v2df)__v1, __scale); } __funline void _mm256_i64scatter_ps(void *__addr, __m256i __index, __m128 __v1, const int __scale) { __builtin_ia32_scatterdiv8sf(__addr, (__mmask8)0xFF, (__v4di)__index, (__v4sf)__v1, __scale); } __funline void _mm256_mask_i64scatter_ps(void *__addr, __mmask8 __mask, __m256i __index, __m128 __v1, const int __scale) { __builtin_ia32_scatterdiv8sf(__addr, __mask, (__v4di)__index, (__v4sf)__v1, __scale); } __funline void _mm_i64scatter_ps(void *__addr, __m128i __index, __m128 __v1, const int __scale) { __builtin_ia32_scatterdiv4sf(__addr, (__mmask8)0xFF, (__v2di)__index, (__v4sf)__v1, __scale); } __funline void _mm_mask_i64scatter_ps(void *__addr, __mmask8 __mask, __m128i __index, __m128 __v1, const int __scale) { __builtin_ia32_scatterdiv4sf(__addr, __mask, (__v2di)__index, (__v4sf)__v1, __scale); } __funline void _mm256_i64scatter_pd(void *__addr, __m256i __index, __m256d __v1, const int __scale) { __builtin_ia32_scatterdiv4df(__addr, (__mmask8)0xFF, (__v4di)__index, (__v4df)__v1, __scale); } __funline void _mm256_mask_i64scatter_pd(void *__addr, __mmask8 __mask, __m256i __index, __m256d __v1, const int __scale) { __builtin_ia32_scatterdiv4df(__addr, __mask, (__v4di)__index, (__v4df)__v1, __scale); } __funline void _mm_i64scatter_pd(void *__addr, __m128i __index, __m128d __v1, const int __scale) { __builtin_ia32_scatterdiv2df(__addr, (__mmask8)0xFF, (__v2di)__index, (__v2df)__v1, __scale); } __funline void _mm_mask_i64scatter_pd(void *__addr, __mmask8 __mask, __m128i __index, __m128d __v1, const int __scale) { __builtin_ia32_scatterdiv2df(__addr, __mask, (__v2di)__index, (__v2df)__v1, __scale); } __funline void _mm256_i32scatter_epi32(void *__addr, __m256i __index, __m256i __v1, const int __scale) { __builtin_ia32_scattersiv8si(__addr, (__mmask8)0xFF, (__v8si)__index, (__v8si)__v1, __scale); } __funline void _mm256_mask_i32scatter_epi32(void *__addr, __mmask8 __mask, __m256i __index, __m256i __v1, const int __scale) { __builtin_ia32_scattersiv8si(__addr, __mask, (__v8si)__index, (__v8si)__v1, __scale); } __funline void _mm_i32scatter_epi32(void *__addr, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scattersiv4si(__addr, (__mmask8)0xFF, (__v4si)__index, (__v4si)__v1, __scale); } __funline void _mm_mask_i32scatter_epi32(void *__addr, __mmask8 __mask, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scattersiv4si(__addr, __mask, (__v4si)__index, (__v4si)__v1, __scale); } __funline void _mm256_i32scatter_epi64(void *__addr, __m128i __index, __m256i __v1, const int __scale) { __builtin_ia32_scattersiv4di(__addr, (__mmask8)0xFF, (__v4si)__index, (__v4di)__v1, __scale); } __funline void _mm256_mask_i32scatter_epi64(void *__addr, __mmask8 __mask, __m128i __index, __m256i __v1, const int __scale) { __builtin_ia32_scattersiv4di(__addr, __mask, (__v4si)__index, (__v4di)__v1, __scale); } __funline void _mm_i32scatter_epi64(void *__addr, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scattersiv2di(__addr, (__mmask8)0xFF, (__v4si)__index, (__v2di)__v1, __scale); } __funline void _mm_mask_i32scatter_epi64(void *__addr, __mmask8 __mask, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scattersiv2di(__addr, __mask, (__v4si)__index, (__v2di)__v1, __scale); } __funline void _mm256_i64scatter_epi32(void *__addr, __m256i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv8si(__addr, (__mmask8)0xFF, (__v4di)__index, (__v4si)__v1, __scale); } __funline void _mm256_mask_i64scatter_epi32(void *__addr, __mmask8 __mask, __m256i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv8si(__addr, __mask, (__v4di)__index, (__v4si)__v1, __scale); } __funline void _mm_i64scatter_epi32(void *__addr, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv4si(__addr, (__mmask8)0xFF, (__v2di)__index, (__v4si)__v1, __scale); } __funline void _mm_mask_i64scatter_epi32(void *__addr, __mmask8 __mask, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv4si(__addr, __mask, (__v2di)__index, (__v4si)__v1, __scale); } __funline void _mm256_i64scatter_epi64(void *__addr, __m256i __index, __m256i __v1, const int __scale) { __builtin_ia32_scatterdiv4di(__addr, (__mmask8)0xFF, (__v4di)__index, (__v4di)__v1, __scale); } __funline void _mm256_mask_i64scatter_epi64(void *__addr, __mmask8 __mask, __m256i __index, __m256i __v1, const int __scale) { __builtin_ia32_scatterdiv4di(__addr, __mask, (__v4di)__index, (__v4di)__v1, __scale); } __funline void _mm_i64scatter_epi64(void *__addr, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv2di(__addr, (__mmask8)0xFF, (__v2di)__index, (__v2di)__v1, __scale); } __funline void _mm_mask_i64scatter_epi64(void *__addr, __mmask8 __mask, __m128i __index, __m128i __v1, const int __scale) { __builtin_ia32_scatterdiv2di(__addr, __mask, (__v2di)__index, (__v2di)__v1, __scale); } __funline __m256i _mm256_mask_shuffle_epi32(__m256i __W, __mmask8 __U, __m256i __A, _MM_PERM_ENUM __mask) { return (__m256i)__builtin_ia32_pshufd256_mask((__v8si)__A, __mask, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_shuffle_epi32(__mmask8 __U, __m256i __A, _MM_PERM_ENUM __mask) { return (__m256i)__builtin_ia32_pshufd256_mask( (__v8si)__A, __mask, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_shuffle_epi32(__m128i __W, __mmask8 __U, __m128i __A, _MM_PERM_ENUM __mask) { return (__m128i)__builtin_ia32_pshufd128_mask((__v4si)__A, __mask, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_shuffle_epi32(__mmask8 __U, __m128i __A, _MM_PERM_ENUM __mask) { return (__m128i)__builtin_ia32_pshufd128_mask( (__v4si)__A, __mask, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rol_epi32(__m256i __A, const int __B) { return (__m256i)__builtin_ia32_prold256_mask( (__v8si)__A, __B, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rol_epi32(__m256i __W, __mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prold256_mask((__v8si)__A, __B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rol_epi32(__mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prold256_mask( (__v8si)__A, __B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rol_epi32(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_prold128_mask( (__v4si)__A, __B, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rol_epi32(__m128i __W, __mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prold128_mask((__v4si)__A, __B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rol_epi32(__mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prold128_mask( (__v4si)__A, __B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_ror_epi32(__m256i __A, const int __B) { return (__m256i)__builtin_ia32_prord256_mask( (__v8si)__A, __B, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_ror_epi32(__m256i __W, __mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prord256_mask((__v8si)__A, __B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_ror_epi32(__mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prord256_mask( (__v8si)__A, __B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_ror_epi32(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_prord128_mask( (__v4si)__A, __B, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_ror_epi32(__m128i __W, __mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prord128_mask((__v4si)__A, __B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_ror_epi32(__mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prord128_mask( (__v4si)__A, __B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_rol_epi64(__m256i __A, const int __B) { return (__m256i)__builtin_ia32_prolq256_mask( (__v4di)__A, __B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_rol_epi64(__m256i __W, __mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prolq256_mask((__v4di)__A, __B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_rol_epi64(__mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prolq256_mask( (__v4di)__A, __B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_rol_epi64(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_prolq128_mask( (__v2di)__A, __B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_rol_epi64(__m128i __W, __mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prolq128_mask((__v2di)__A, __B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_rol_epi64(__mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prolq128_mask( (__v2di)__A, __B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_ror_epi64(__m256i __A, const int __B) { return (__m256i)__builtin_ia32_prorq256_mask( (__v4di)__A, __B, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_ror_epi64(__m256i __W, __mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prorq256_mask((__v4di)__A, __B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_ror_epi64(__mmask8 __U, __m256i __A, const int __B) { return (__m256i)__builtin_ia32_prorq256_mask( (__v4di)__A, __B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_ror_epi64(__m128i __A, const int __B) { return (__m128i)__builtin_ia32_prorq128_mask( (__v2di)__A, __B, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_ror_epi64(__m128i __W, __mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prorq128_mask((__v2di)__A, __B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_ror_epi64(__mmask8 __U, __m128i __A, const int __B) { return (__m128i)__builtin_ia32_prorq128_mask( (__v2di)__A, __B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_alignr_epi32(__m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignd128_mask((__v4si)__A, (__v4si)__B, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_alignr_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignd128_mask((__v4si)__A, (__v4si)__B, __imm, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_alignr_epi32(__mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignd128_mask((__v4si)__A, (__v4si)__B, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_alignr_epi64(__m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignq128_mask((__v2di)__A, (__v2di)__B, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_alignr_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignq128_mask((__v2di)__A, (__v2di)__B, __imm, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_alignr_epi64(__mmask8 __U, __m128i __A, __m128i __B, const int __imm) { return (__m128i)__builtin_ia32_alignq128_mask((__v2di)__A, (__v2di)__B, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_alignr_epi32(__m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignd256_mask((__v8si)__A, (__v8si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_alignr_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignd256_mask((__v8si)__A, (__v8si)__B, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_alignr_epi32(__mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignd256_mask((__v8si)__A, (__v8si)__B, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_alignr_epi64(__m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignq256_mask((__v4di)__A, (__v4di)__B, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_alignr_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignq256_mask((__v4di)__A, (__v4di)__B, __imm, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_alignr_epi64(__mmask8 __U, __m256i __A, __m256i __B, const int __imm) { return (__m256i)__builtin_ia32_alignq256_mask((__v4di)__A, (__v4di)__B, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m128 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)__A, __I, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_cvtps_ph(__mmask8 __U, __m128 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph_mask( (__v4sf)__A, __I, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm256_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m256 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)__A, __I, (__v8hi)__W, (__mmask8)__U); } __funline __m128i _mm256_maskz_cvtps_ph(__mmask8 __U, __m256 __A, const int __I) { return (__m128i)__builtin_ia32_vcvtps2ph256_mask( (__v8sf)__A, __I, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psradi256_mask((__v8si)__A, __imm, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psradi256_mask( (__v8si)__A, __imm, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psradi128_mask((__v4si)__A, __imm, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psradi128_mask( (__v4si)__A, __imm, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_srai_epi64(__m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psraqi256_mask( (__v4di)__A, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)-1); } __funline __m256i _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psraqi256_mask((__v4di)__A, __imm, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, const int __imm) { return (__m256i)__builtin_ia32_psraqi256_mask( (__v4di)__A, __imm, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m128i _mm_srai_epi64(__m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psraqi128_mask( (__v2di)__A, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)-1); } __funline __m128i _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psraqi128_mask((__v2di)__A, __imm, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, const int __imm) { return (__m128i)__builtin_ia32_psraqi128_mask( (__v2di)__A, __imm, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_pslldi128_mask((__v4si)__A, __B, (__v4si)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_pslldi128_mask( (__v4si)__A, __B, (__v4si)_mm_setzero_si128(), (__mmask8)__U); } __funline __m128i _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_psllqi128_mask((__v2di)__A, __B, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B) { return (__m128i)__builtin_ia32_psllqi128_mask( (__v2di)__A, __B, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_pslldi256_mask((__v8si)__A, __B, (__v8si)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_pslldi256_mask( (__v8si)__A, __B, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256i _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_psllqi256_mask((__v4di)__A, __B, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B) { return (__m256i)__builtin_ia32_psllqi256_mask( (__v4di)__A, __B, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } __funline __m256d _mm256_mask_permutex_pd(__m256d __W, __mmask8 __U, __m256d __X, const int __imm) { return (__m256d)__builtin_ia32_permdf256_mask((__v4df)__X, __imm, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_permutex_pd(__mmask8 __U, __m256d __X, const int __imm) { return (__m256d)__builtin_ia32_permdf256_mask( (__v4df)__X, __imm, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m256d _mm256_mask_permute_pd(__m256d __W, __mmask8 __U, __m256d __X, const int __C) { return (__m256d)__builtin_ia32_vpermilpd256_mask((__v4df)__X, __C, (__v4df)__W, (__mmask8)__U); } __funline __m256d _mm256_maskz_permute_pd(__mmask8 __U, __m256d __X, const int __C) { return (__m256d)__builtin_ia32_vpermilpd256_mask( (__v4df)__X, __C, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); } __funline __m128d _mm_mask_permute_pd(__m128d __W, __mmask8 __U, __m128d __X, const int __C) { return (__m128d)__builtin_ia32_vpermilpd_mask((__v2df)__X, __C, (__v2df)__W, (__mmask8)__U); } __funline __m128d _mm_maskz_permute_pd(__mmask8 __U, __m128d __X, const int __C) { return (__m128d)__builtin_ia32_vpermilpd_mask( (__v2df)__X, __C, (__v2df)_mm_setzero_pd(), (__mmask8)__U); } __funline __m256 _mm256_mask_permute_ps(__m256 __W, __mmask8 __U, __m256 __X, const int __C) { return (__m256)__builtin_ia32_vpermilps256_mask((__v8sf)__X, __C, (__v8sf)__W, (__mmask8)__U); } __funline __m256 _mm256_maskz_permute_ps(__mmask8 __U, __m256 __X, const int __C) { return (__m256)__builtin_ia32_vpermilps256_mask( (__v8sf)__X, __C, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); } __funline __m128 _mm_mask_permute_ps(__m128 __W, __mmask8 __U, __m128 __X, const int __C) { return (__m128)__builtin_ia32_vpermilps_mask((__v4sf)__X, __C, (__v4sf)__W, (__mmask8)__U); } __funline __m128 _mm_maskz_permute_ps(__mmask8 __U, __m128 __X, const int __C) { return (__m128)__builtin_ia32_vpermilps_mask( (__v4sf)__X, __C, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); } __funline __m256d _mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W) { return (__m256d)__builtin_ia32_blendmpd_256_mask((__v4df)__A, (__v4df)__W, (__mmask8)__U); } __funline __m256 _mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W) { return (__m256)__builtin_ia32_blendmps_256_mask((__v8sf)__A, (__v8sf)__W, (__mmask8)__U); } __funline __m256i _mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W) { return (__m256i)__builtin_ia32_blendmq_256_mask((__v4di)__A, (__v4di)__W, (__mmask8)__U); } __funline __m256i _mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W) { return (__m256i)__builtin_ia32_blendmd_256_mask((__v8si)__A, (__v8si)__W, (__mmask8)__U); } __funline __m128d _mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W) { return (__m128d)__builtin_ia32_blendmpd_128_mask((__v2df)__A, (__v2df)__W, (__mmask8)__U); } __funline __m128 _mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W) { return (__m128)__builtin_ia32_blendmps_128_mask((__v4sf)__A, (__v4sf)__W, (__mmask8)__U); } __funline __m128i _mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W) { return (__m128i)__builtin_ia32_blendmq_128_mask((__v2di)__A, (__v2di)__W, (__mmask8)__U); } __funline __m128i _mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W) { return (__m128i)__builtin_ia32_blendmd_128_mask((__v4si)__A, (__v4si)__W, (__mmask8)__U); } __funline __mmask8 _mm256_cmp_epi64_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_cmp_epi32_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_cmp_epu64_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_cmp_epu32_mask(__m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_cmp_pd_mask(__m256d __X, __m256d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)__X, (__v4df)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_cmp_ps_mask(__m256 __X, __m256 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)__X, (__v8sf)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm256_mask_cmp_epi64_mask(__mmask8 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__X, (__v4di)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm256_mask_cmp_epi32_mask(__mmask8 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__X, (__v8si)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm256_mask_cmp_epu64_mask(__mmask8 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__X, (__v4di)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm256_mask_cmp_epu32_mask(__mmask8 __U, __m256i __X, __m256i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__X, (__v8si)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm256_mask_cmp_pd_mask(__mmask8 __U, __m256d __X, __m256d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)__X, (__v4df)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm256_mask_cmp_ps_mask(__mmask8 __U, __m256 __X, __m256 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)__X, (__v8sf)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_cmp_epi64_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_cmp_epi32_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_cmp_epu64_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_cmp_epu32_mask(__m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_cmp_pd_mask(__m128d __X, __m128d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)__X, (__v2df)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_cmp_ps_mask(__m128 __X, __m128 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)-1); } __funline __mmask8 _mm_mask_cmp_epi64_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__X, (__v2di)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_mask_cmp_epi32_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__X, (__v4si)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_mask_cmp_epu64_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__X, (__v2di)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_mask_cmp_epu32_mask(__mmask8 __U, __m128i __X, __m128i __Y, const int __P) { return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__X, (__v4si)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_mask_cmp_pd_mask(__mmask8 __U, __m128d __X, __m128d __Y, const int __P) { return (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)__X, (__v2df)__Y, __P, (__mmask8)__U); } __funline __mmask8 _mm_mask_cmp_ps_mask(__mmask8 __U, __m128 __X, __m128 __Y, const int __P) { return (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)__X, (__v4sf)__Y, __P, (__mmask8)__U); } __funline __m256d _mm256_permutex_pd(__m256d __X, const int __M) { return (__m256d)__builtin_ia32_permdf256_mask( (__v4df)__X, __M, (__v4df)_mm256_undefined_pd(), (__mmask8)-1); } #else #define _mm256_permutex_pd(X, M) \ ((__m256d)__builtin_ia32_permdf256_mask( \ (__v4df)(__m256d)(X), (int)(M), (__v4df)(__m256d)_mm256_undefined_pd(), \ (__mmask8)-1)) #define _mm256_permutex_epi64(X, I) \ ((__m256i)__builtin_ia32_permdi256_mask( \ (__v4di)(__m256i)(X), (int)(I), \ (__v4di)(__m256i)(_mm256_setzero_si256()), (__mmask8)-1)) #define _mm256_maskz_permutex_epi64(M, X, I) \ ((__m256i)__builtin_ia32_permdi256_mask( \ (__v4di)(__m256i)(X), (int)(I), \ (__v4di)(__m256i)(_mm256_setzero_si256()), (__mmask8)(M))) #define _mm256_mask_permutex_epi64(W, M, X, I) \ ((__m256i)__builtin_ia32_permdi256_mask( \ (__v4di)(__m256i)(X), (int)(I), (__v4di)(__m256i)(W), (__mmask8)(M))) #define _mm256_insertf32x4(X, Y, C) \ ((__m256)__builtin_ia32_insertf32x4_256_mask( \ (__v8sf)(__m256)(X), (__v4sf)(__m128)(Y), (int)(C), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)-1)) #define _mm256_mask_insertf32x4(W, U, X, Y, C) \ ((__m256)__builtin_ia32_insertf32x4_256_mask( \ (__v8sf)(__m256)(X), (__v4sf)(__m128)(Y), (int)(C), (__v8sf)(__m256)(W), \ (__mmask8)(U))) #define _mm256_maskz_insertf32x4(U, X, Y, C) \ ((__m256)__builtin_ia32_insertf32x4_256_mask( \ (__v8sf)(__m256)(X), (__v4sf)(__m128)(Y), (int)(C), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)(U))) #define _mm256_inserti32x4(X, Y, C) \ ((__m256i)__builtin_ia32_inserti32x4_256_mask( \ (__v8si)(__m256i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)-1)) #define _mm256_mask_inserti32x4(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_inserti32x4_256_mask( \ (__v8si)(__m256i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_inserti32x4(U, X, Y, C) \ ((__m256i)__builtin_ia32_inserti32x4_256_mask( \ (__v8si)(__m256i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm256_extractf32x4_ps(X, C) \ ((__m128)__builtin_ia32_extractf32x4_256_mask( \ (__v8sf)(__m256)(X), (int)(C), (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)-1)) #define _mm256_mask_extractf32x4_ps(W, U, X, C) \ ((__m128)__builtin_ia32_extractf32x4_256_mask( \ (__v8sf)(__m256)(X), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm256_maskz_extractf32x4_ps(U, X, C) \ ((__m128)__builtin_ia32_extractf32x4_256_mask( \ (__v8sf)(__m256)(X), (int)(C), (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)(U))) #define _mm256_extracti32x4_epi32(X, C) \ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm256_mask_extracti32x4_epi32(W, U, X, C) \ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm256_maskz_extracti32x4_epi32(U, X, C) \ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_shuffle_i64x2(X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i64x2_256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)_mm256_setzero_si256(), (__mmask8)-1)) #define _mm256_mask_shuffle_i64x2(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i64x2_256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_i64x2(U, X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i64x2_256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm256_shuffle_i32x4(X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i32x4_256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)-1)) #define _mm256_mask_shuffle_i32x4(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i32x4_256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_i32x4(U, X, Y, C) \ ((__m256i)__builtin_ia32_shuf_i32x4_256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm256_shuffle_f64x2(X, Y, C) \ ((__m256d)__builtin_ia32_shuf_f64x2_256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)-1)) #define _mm256_mask_shuffle_f64x2(W, U, X, Y, C) \ ((__m256d)__builtin_ia32_shuf_f64x2_256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_f64x2(U, X, Y, C) \ ((__m256d)__builtin_ia32_shuf_f64x2_256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (int)(C), \ (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)(U))) #define _mm256_shuffle_f32x4(X, Y, C) \ ((__m256)__builtin_ia32_shuf_f32x4_256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (int)(C), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)-1)) #define _mm256_mask_shuffle_f32x4(W, U, X, Y, C) \ ((__m256)__builtin_ia32_shuf_f32x4_256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (int)(C), (__v8sf)(__m256)(W), \ (__mmask8)(U))) #define _mm256_maskz_shuffle_f32x4(U, X, Y, C) \ ((__m256)__builtin_ia32_shuf_f32x4_256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (int)(C), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)(U))) #define _mm256_mask_shuffle_pd(W, U, A, B, C) \ ((__m256d)__builtin_ia32_shufpd256_mask( \ (__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \ (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_pd(U, A, B, C) \ ((__m256d)__builtin_ia32_shufpd256_mask( \ (__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \ (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)(U))) #define _mm_mask_shuffle_pd(W, U, A, B, C) \ ((__m128d)__builtin_ia32_shufpd128_mask( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)(__m128d)(W), (__mmask8)(U))) #define _mm_maskz_shuffle_pd(U, A, B, C) \ ((__m128d)__builtin_ia32_shufpd128_mask( \ (__v2df)(__m128d)(A), (__v2df)(__m128d)(B), (int)(C), \ (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)(U))) #define _mm256_mask_shuffle_ps(W, U, A, B, C) \ ((__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \ (__v8sf)(__m256)(B), (int)(C), \ (__v8sf)(__m256)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_ps(U, A, B, C) \ ((__m256)__builtin_ia32_shufps256_mask( \ (__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)(U))) #define _mm_mask_shuffle_ps(W, U, A, B, C) \ ((__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm_maskz_shuffle_ps(U, A, B, C) \ ((__m128)__builtin_ia32_shufps128_mask( \ (__v4sf)(__m128)(A), (__v4sf)(__m128)(B), (int)(C), \ (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)(U))) #define _mm256_fixupimm_pd(X, Y, Z, C) \ ((__m256d)__builtin_ia32_fixupimmpd256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (__v4di)(__m256i)(Z), \ (int)(C), (__mmask8)(-1))) #define _mm256_mask_fixupimm_pd(X, U, Y, Z, C) \ ((__m256d)__builtin_ia32_fixupimmpd256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (__v4di)(__m256i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm256_maskz_fixupimm_pd(U, X, Y, Z, C) \ ((__m256d)__builtin_ia32_fixupimmpd256_maskz( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (__v4di)(__m256i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm256_fixupimm_ps(X, Y, Z, C) \ ((__m256)__builtin_ia32_fixupimmps256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (__v8si)(__m256i)(Z), \ (int)(C), (__mmask8)(-1))) #define _mm256_mask_fixupimm_ps(X, U, Y, Z, C) \ ((__m256)__builtin_ia32_fixupimmps256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (__v8si)(__m256i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm256_maskz_fixupimm_ps(U, X, Y, Z, C) \ ((__m256)__builtin_ia32_fixupimmps256_maskz( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (__v8si)(__m256i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm_fixupimm_pd(X, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmpd128_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(-1))) #define _mm_mask_fixupimm_pd(X, U, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmpd128_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm_maskz_fixupimm_pd(U, X, Y, Z, C) \ ((__m128d)__builtin_ia32_fixupimmpd128_maskz( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm_fixupimm_ps(X, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmps128_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(-1))) #define _mm_mask_fixupimm_ps(X, U, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmps128_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm_maskz_fixupimm_ps(U, X, Y, Z, C) \ ((__m128)__builtin_ia32_fixupimmps128_maskz( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), \ (int)(C), (__mmask8)(U))) #define _mm256_mask_srli_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_psrldi256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_srli_epi32(U, A, B) \ ((__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(B), \ (__v8si)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_mask_srli_epi32(W, U, A, B) \ ((__m128i)__builtin_ia32_psrldi128_mask( \ (__v4si)(__m128i)(A), (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srli_epi32(U, A, B) \ ((__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_srli_epi64(W, U, A, B) \ ((__m256i)__builtin_ia32_psrlqi256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_srli_epi64(U, A, B) \ ((__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(B), \ (__v4di)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_mask_srli_epi64(W, U, A, B) \ ((__m128i)__builtin_ia32_psrlqi128_mask( \ (__v2di)(__m128i)(A), (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srli_epi64(U, A, B) \ ((__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_slli_epi32(W, U, X, C) \ ((__m256i)__builtin_ia32_pslldi256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_slli_epi32(U, X, C) \ ((__m256i)__builtin_ia32_pslldi256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm256_mask_slli_epi64(W, U, X, C) \ ((__m256i)__builtin_ia32_psllqi256_mask( \ (__v4di)(__m256i)(X), (int)(C), (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_slli_epi64(U, X, C) \ ((__m256i)__builtin_ia32_psllqi256_mask( \ (__v4di)(__m256i)(X), (int)(C), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_mask_slli_epi32(W, U, X, C) \ ((__m128i)__builtin_ia32_pslldi128_mask( \ (__v4si)(__m128i)(X), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_slli_epi32(U, X, C) \ ((__m128i)__builtin_ia32_pslldi128_mask( \ (__v4si)(__m128i)(X), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm_mask_slli_epi64(W, U, X, C) \ ((__m128i)__builtin_ia32_psllqi128_mask( \ (__v2di)(__m128i)(X), (int)(C), (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_slli_epi64(U, X, C) \ ((__m128i)__builtin_ia32_psllqi128_mask( \ (__v2di)(__m128i)(X), (int)(C), (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_ternarylogic_epi64(A, B, C, I) \ ((__m256i)__builtin_ia32_pternlogq256_mask( \ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ (int)(I), (__mmask8)-1)) #define _mm256_mask_ternarylogic_epi64(A, U, B, C, I) \ ((__m256i)__builtin_ia32_pternlogq256_mask( \ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ (int)(I), (__mmask8)(U))) #define _mm256_maskz_ternarylogic_epi64(U, A, B, C, I) \ ((__m256i)__builtin_ia32_pternlogq256_maskz( \ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ (int)(I), (__mmask8)(U))) #define _mm256_ternarylogic_epi32(A, B, C, I) \ ((__m256i)__builtin_ia32_pternlogd256_mask( \ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ (int)(I), (__mmask8)-1)) #define _mm256_mask_ternarylogic_epi32(A, U, B, C, I) \ ((__m256i)__builtin_ia32_pternlogd256_mask( \ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ (int)(I), (__mmask8)(U))) #define _mm256_maskz_ternarylogic_epi32(U, A, B, C, I) \ ((__m256i)__builtin_ia32_pternlogd256_maskz( \ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ (int)(I), (__mmask8)(U))) #define _mm_ternarylogic_epi64(A, B, C, I) \ ((__m128i)__builtin_ia32_pternlogq128_mask( \ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ (int)(I), (__mmask8)-1)) #define _mm_mask_ternarylogic_epi64(A, U, B, C, I) \ ((__m128i)__builtin_ia32_pternlogq128_mask( \ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ (int)(I), (__mmask8)(U))) #define _mm_maskz_ternarylogic_epi64(U, A, B, C, I) \ ((__m128i)__builtin_ia32_pternlogq128_maskz( \ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ (int)(I), (__mmask8)(U))) #define _mm_ternarylogic_epi32(A, B, C, I) \ ((__m128i)__builtin_ia32_pternlogd128_mask( \ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ (int)(I), (__mmask8)-1)) #define _mm_mask_ternarylogic_epi32(A, U, B, C, I) \ ((__m128i)__builtin_ia32_pternlogd128_mask( \ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ (int)(I), (__mmask8)(U))) #define _mm_maskz_ternarylogic_epi32(U, A, B, C, I) \ ((__m128i)__builtin_ia32_pternlogd128_maskz( \ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ (int)(I), (__mmask8)(U))) #define _mm256_roundscale_ps(A, B) \ ((__m256)__builtin_ia32_rndscaleps_256_mask( \ (__v8sf)(__m256)(A), (int)(B), (__v8sf)(__m256)_mm256_setzero_ps(), \ (__mmask8)-1)) #define _mm256_mask_roundscale_ps(W, U, A, B) \ ((__m256)__builtin_ia32_rndscaleps_256_mask( \ (__v8sf)(__m256)(A), (int)(B), (__v8sf)(__m256)(W), (__mmask8)(U))) #define _mm256_maskz_roundscale_ps(U, A, B) \ ((__m256)__builtin_ia32_rndscaleps_256_mask( \ (__v8sf)(__m256)(A), (int)(B), (__v8sf)(__m256)_mm256_setzero_ps(), \ (__mmask8)(U))) #define _mm256_roundscale_pd(A, B) \ ((__m256d)__builtin_ia32_rndscalepd_256_mask( \ (__v4df)(__m256d)(A), (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), \ (__mmask8)-1)) #define _mm256_mask_roundscale_pd(W, U, A, B) \ ((__m256d)__builtin_ia32_rndscalepd_256_mask( \ (__v4df)(__m256d)(A), (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm256_maskz_roundscale_pd(U, A, B) \ ((__m256d)__builtin_ia32_rndscalepd_256_mask( \ (__v4df)(__m256d)(A), (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), \ (__mmask8)(U))) #define _mm_roundscale_ps(A, B) \ ((__m128)__builtin_ia32_rndscaleps_128_mask( \ (__v4sf)(__m128)(A), (int)(B), (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)-1)) #define _mm_mask_roundscale_ps(W, U, A, B) \ ((__m128)__builtin_ia32_rndscaleps_128_mask( \ (__v4sf)(__m128)(A), (int)(B), (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm_maskz_roundscale_ps(U, A, B) \ ((__m128)__builtin_ia32_rndscaleps_128_mask( \ (__v4sf)(__m128)(A), (int)(B), (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)(U))) #define _mm_roundscale_pd(A, B) \ ((__m128d)__builtin_ia32_rndscalepd_128_mask( \ (__v2df)(__m128d)(A), (int)(B), (__v2df)(__m128d)_mm_setzero_pd(), \ (__mmask8)-1)) #define _mm_mask_roundscale_pd(W, U, A, B) \ ((__m128d)__builtin_ia32_rndscalepd_128_mask( \ (__v2df)(__m128d)(A), (int)(B), (__v2df)(__m128d)(W), (__mmask8)(U))) #define _mm_maskz_roundscale_pd(U, A, B) \ ((__m128d)__builtin_ia32_rndscalepd_128_mask( \ (__v2df)(__m128d)(A), (int)(B), (__v2df)(__m128d)_mm_setzero_pd(), \ (__mmask8)(U))) #define _mm256_getmant_ps(X, B, C) \ ((__m256)__builtin_ia32_getmantps256_mask( \ (__v8sf)(__m256)(X), (int)(((C) << 2) | (B)), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)-1)) #define _mm256_mask_getmant_ps(W, U, X, B, C) \ ((__m256)__builtin_ia32_getmantps256_mask( \ (__v8sf)(__m256)(X), (int)(((C) << 2) | (B)), (__v8sf)(__m256)(W), \ (__mmask8)(U))) #define _mm256_maskz_getmant_ps(U, X, B, C) \ ((__m256)__builtin_ia32_getmantps256_mask( \ (__v8sf)(__m256)(X), (int)(((C) << 2) | (B)), \ (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)(U))) #define _mm_getmant_ps(X, B, C) \ ((__m128)__builtin_ia32_getmantps128_mask( \ (__v4sf)(__m128)(X), (int)(((C) << 2) | (B)), \ (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)-1)) #define _mm_mask_getmant_ps(W, U, X, B, C) \ ((__m128)__builtin_ia32_getmantps128_mask( \ (__v4sf)(__m128)(X), (int)(((C) << 2) | (B)), (__v4sf)(__m128)(W), \ (__mmask8)(U))) #define _mm_maskz_getmant_ps(U, X, B, C) \ ((__m128)__builtin_ia32_getmantps128_mask( \ (__v4sf)(__m128)(X), (int)(((C) << 2) | (B)), \ (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)(U))) #define _mm256_getmant_pd(X, B, C) \ ((__m256d)__builtin_ia32_getmantpd256_mask( \ (__v4df)(__m256d)(X), (int)(((C) << 2) | (B)), \ (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)-1)) #define _mm256_mask_getmant_pd(W, U, X, B, C) \ ((__m256d)__builtin_ia32_getmantpd256_mask( \ (__v4df)(__m256d)(X), (int)(((C) << 2) | (B)), (__v4df)(__m256d)(W), \ (__mmask8)(U))) #define _mm256_maskz_getmant_pd(U, X, B, C) \ ((__m256d)__builtin_ia32_getmantpd256_mask( \ (__v4df)(__m256d)(X), (int)(((C) << 2) | (B)), \ (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)(U))) #define _mm_getmant_pd(X, B, C) \ ((__m128d)__builtin_ia32_getmantpd128_mask( \ (__v2df)(__m128d)(X), (int)(((C) << 2) | (B)), \ (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)-1)) #define _mm_mask_getmant_pd(W, U, X, B, C) \ ((__m128d)__builtin_ia32_getmantpd128_mask( \ (__v2df)(__m128d)(X), (int)(((C) << 2) | (B)), (__v2df)(__m128d)(W), \ (__mmask8)(U))) #define _mm_maskz_getmant_pd(U, X, B, C) \ ((__m128d)__builtin_ia32_getmantpd128_mask( \ (__v2df)(__m128d)(X), (int)(((C) << 2) | (B)), \ (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)(U))) #define _mm256_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256) __builtin_ia32_gather3siv8sf( \ (__v8sf)(__m256)V1OLD, (void const *)ADDR, (__v8si)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128) __builtin_ia32_gather3siv4sf( \ (__v4sf)(__m128)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256d) __builtin_ia32_gather3siv4df( \ (__v4df)(__m256d)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128d) __builtin_ia32_gather3siv2df( \ (__v2df)(__m128d)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128) __builtin_ia32_gather3div8sf( \ (__v4sf)(__m128)V1OLD, (void const *)ADDR, (__v4di)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128) __builtin_ia32_gather3div4sf( \ (__v4sf)(__m128)V1OLD, (void const *)ADDR, (__v2di)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256d) __builtin_ia32_gather3div4df( \ (__v4df)(__m256d)V1OLD, (void const *)ADDR, (__v4di)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128d) __builtin_ia32_gather3div2df( \ (__v2df)(__m128d)V1OLD, (void const *)ADDR, (__v2di)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256i) __builtin_ia32_gather3siv8si( \ (__v8si)(__m256i)V1OLD, (void const *)ADDR, (__v8si)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128i) __builtin_ia32_gather3siv4si( \ (__v4si)(__m128i)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256i) __builtin_ia32_gather3siv4di( \ (__v4di)(__m256i)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128i) __builtin_ia32_gather3siv2di( \ (__v2di)(__m128i)V1OLD, (void const *)ADDR, (__v4si)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128i) __builtin_ia32_gather3div8si( \ (__v4si)(__m128i)V1OLD, (void const *)ADDR, (__v4di)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128i) __builtin_ia32_gather3div4si( \ (__v4si)(__m128i)V1OLD, (void const *)ADDR, (__v2di)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m256i) __builtin_ia32_gather3div4di( \ (__v4di)(__m256i)V1OLD, (void const *)ADDR, (__v4di)(__m256i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ (__m128i) __builtin_ia32_gather3div2di( \ (__v2di)(__m128i)V1OLD, (void const *)ADDR, (__v2di)(__m128i)INDEX, \ (__mmask8)MASK, (int)SCALE) #define _mm256_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8sf((void *)ADDR, (__mmask8)0xFF, \ (__v8si)(__m256i)INDEX, (__v8sf)(__m256)V1, \ (int)SCALE) #define _mm256_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8sf((void *)ADDR, (__mmask8)MASK, \ (__v8si)(__m256i)INDEX, (__v8sf)(__m256)V1, \ (int)SCALE) #define _mm_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4sf((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4sf((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm256_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4df((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v4df)(__m256d)V1, \ (int)SCALE) #define _mm256_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4df((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v4df)(__m256d)V1, \ (int)SCALE) #define _mm_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv2df((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v2df)(__m128d)V1, \ (int)SCALE) #define _mm_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv2df((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v2df)(__m128d)V1, \ (int)SCALE) #define _mm256_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8sf((void *)ADDR, (__mmask8)0xFF, \ (__v4di)(__m256i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm256_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8sf((void *)ADDR, (__mmask8)MASK, \ (__v4di)(__m256i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4sf((void *)ADDR, (__mmask8)0xFF, \ (__v2di)(__m128i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4sf((void *)ADDR, (__mmask8)MASK, \ (__v2di)(__m128i)INDEX, (__v4sf)(__m128)V1, \ (int)SCALE) #define _mm256_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4df((void *)ADDR, (__mmask8)0xFF, \ (__v4di)(__m256i)INDEX, (__v4df)(__m256d)V1, \ (int)SCALE) #define _mm256_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4df((void *)ADDR, (__mmask8)MASK, \ (__v4di)(__m256i)INDEX, (__v4df)(__m256d)V1, \ (int)SCALE) #define _mm_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv2df((void *)ADDR, (__mmask8)0xFF, \ (__v2di)(__m128i)INDEX, (__v2df)(__m128d)V1, \ (int)SCALE) #define _mm_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv2df((void *)ADDR, (__mmask8)MASK, \ (__v2di)(__m128i)INDEX, (__v2df)(__m128d)V1, \ (int)SCALE) #define _mm256_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8si((void *)ADDR, (__mmask8)0xFF, \ (__v8si)(__m256i)INDEX, (__v8si)(__m256i)V1, \ (int)SCALE) #define _mm256_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv8si((void *)ADDR, (__mmask8)MASK, \ (__v8si)(__m256i)INDEX, (__v8si)(__m256i)V1, \ (int)SCALE) #define _mm_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4si((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4si((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm256_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4di((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v4di)(__m256i)V1, \ (int)SCALE) #define _mm256_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv4di((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v4di)(__m256i)V1, \ (int)SCALE) #define _mm_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv2di((void *)ADDR, (__mmask8)0xFF, \ (__v4si)(__m128i)INDEX, (__v2di)(__m128i)V1, \ (int)SCALE) #define _mm_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scattersiv2di((void *)ADDR, (__mmask8)MASK, \ (__v4si)(__m128i)INDEX, (__v2di)(__m128i)V1, \ (int)SCALE) #define _mm256_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8si((void *)ADDR, (__mmask8)0xFF, \ (__v4di)(__m256i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm256_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv8si((void *)ADDR, (__mmask8)MASK, \ (__v4di)(__m256i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4si((void *)ADDR, (__mmask8)0xFF, \ (__v2di)(__m128i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4si((void *)ADDR, (__mmask8)MASK, \ (__v2di)(__m128i)INDEX, (__v4si)(__m128i)V1, \ (int)SCALE) #define _mm256_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4di((void *)ADDR, (__mmask8)0xFF, \ (__v4di)(__m256i)INDEX, (__v4di)(__m256i)V1, \ (int)SCALE) #define _mm256_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv4di((void *)ADDR, (__mmask8)MASK, \ (__v4di)(__m256i)INDEX, (__v4di)(__m256i)V1, \ (int)SCALE) #define _mm_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv2di((void *)ADDR, (__mmask8)0xFF, \ (__v2di)(__m128i)INDEX, (__v2di)(__m128i)V1, \ (int)SCALE) #define _mm_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ __builtin_ia32_scatterdiv2di((void *)ADDR, (__mmask8)MASK, \ (__v2di)(__m128i)INDEX, (__v2di)(__m128i)V1, \ (int)SCALE) #define _mm256_mask_shuffle_epi32(W, U, X, C) \ ((__m256i)__builtin_ia32_pshufd256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_shuffle_epi32(U, X, C) \ ((__m256i)__builtin_ia32_pshufd256_mask( \ (__v8si)(__m256i)(X), (int)(C), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_mask_shuffle_epi32(W, U, X, C) \ ((__m128i)__builtin_ia32_pshufd128_mask( \ (__v4si)(__m128i)(X), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_shuffle_epi32(U, X, C) \ ((__m128i)__builtin_ia32_pshufd128_mask( \ (__v4si)(__m128i)(X), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_rol_epi64(A, B) \ ((__m256i)__builtin_ia32_prolq256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)-1)) #define _mm256_mask_rol_epi64(W, U, A, B) \ ((__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(A), (int)(B), \ (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_rol_epi64(U, A, B) \ ((__m256i)__builtin_ia32_prolq256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_rol_epi64(A, B) \ ((__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm_mask_rol_epi64(W, U, A, B) \ ((__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_rol_epi64(U, A, B) \ ((__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_ror_epi64(A, B) \ ((__m256i)__builtin_ia32_prorq256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)-1)) #define _mm256_mask_ror_epi64(W, U, A, B) \ ((__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \ (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_ror_epi64(U, A, B) \ ((__m256i)__builtin_ia32_prorq256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_ror_epi64(A, B) \ ((__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm_mask_ror_epi64(W, U, A, B) \ ((__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_ror_epi64(U, A, B) \ ((__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_rol_epi32(A, B) \ ((__m256i)__builtin_ia32_prold256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)-1)) #define _mm256_mask_rol_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(A), (int)(B), \ (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_rol_epi32(U, A, B) \ ((__m256i)__builtin_ia32_prold256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_rol_epi32(A, B) \ ((__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm_mask_rol_epi32(W, U, A, B) \ ((__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_rol_epi32(U, A, B) \ ((__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_ror_epi32(A, B) \ ((__m256i)__builtin_ia32_prord256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)-1)) #define _mm256_mask_ror_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \ (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_ror_epi32(U, A, B) \ ((__m256i)__builtin_ia32_prord256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_ror_epi32(A, B) \ ((__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm_mask_ror_epi32(W, U, A, B) \ ((__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_ror_epi32(U, A, B) \ ((__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_alignr_epi32(X, Y, C) \ ((__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(X), \ (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)(X), (__mmask8)-1)) #define _mm256_mask_alignr_epi32(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_alignd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_alignr_epi32(U, X, Y, C) \ ((__m256i)__builtin_ia32_alignd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(C), \ (__v8si)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm256_alignr_epi64(X, Y, C) \ ((__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(X), \ (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)(X), (__mmask8)-1)) #define _mm256_mask_alignr_epi64(W, U, X, Y, C) \ ((__m256i)__builtin_ia32_alignq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_alignr_epi64(U, X, Y, C) \ ((__m256i)__builtin_ia32_alignq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(C), \ (__v4di)(__m256i)_mm256_setzero_si256(), (__mmask8)(U))) #define _mm_alignr_epi32(X, Y, C) \ ((__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(X), \ (__v4si)(__m128i)(Y), (int)(C), \ (__v4si)(__m128i)(X), (__mmask8)-1)) #define _mm_mask_alignr_epi32(W, U, X, Y, C) \ ((__m128i)__builtin_ia32_alignd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_alignr_epi32(U, X, Y, C) \ ((__m128i)__builtin_ia32_alignd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(C), \ (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)(U))) #define _mm_alignr_epi64(X, Y, C) \ ((__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(X), \ (__v2di)(__m128i)(Y), (int)(C), \ (__v2di)(__m128i)(X), (__mmask8)-1)) #define _mm_mask_alignr_epi64(W, U, X, Y, C) \ ((__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(X), \ (__v2di)(__m128i)(Y), (int)(C), \ (__v2di)(__m128i)(X), (__mmask8)-1)) #define _mm_maskz_alignr_epi64(U, X, Y, C) \ ((__m128i)__builtin_ia32_alignq128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(C), \ (__v2di)(__m128i)_mm_setzero_si128(), (__mmask8)(U))) #define _mm_mask_cvtps_ph(W, U, A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph_mask( \ (__v4sf)(__m128)A, (int)(I), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_cvtps_ph(U, A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph_mask( \ (__v4sf)(__m128)A, (int)(I), (__v8hi)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_cvtps_ph(W, U, A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph256_mask( \ (__v8sf)(__m256)A, (int)(I), (__v8hi)(__m128i)(W), (__mmask8)(U))) #define _mm256_maskz_cvtps_ph(U, A, I) \ ((__m128i)__builtin_ia32_vcvtps2ph256_mask( \ (__v8sf)(__m256)A, (int)(I), (__v8hi)(__m128i)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_srai_epi32(W, U, A, B) \ ((__m256i)__builtin_ia32_psradi256_mask( \ (__v8si)(__m256i)(A), (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_srai_epi32(U, A, B) \ ((__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(B), \ (__v8si)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_mask_srai_epi32(W, U, A, B) \ ((__m128i)__builtin_ia32_psradi128_mask( \ (__v4si)(__m128i)(A), (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srai_epi32(U, A, B) \ ((__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(B), \ (__v4si)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_srai_epi64(A, B) \ ((__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(B), \ (__v4di)_mm256_setzero_si256(), \ (__mmask8)-1)) #define _mm256_mask_srai_epi64(W, U, A, B) \ ((__m256i)__builtin_ia32_psraqi256_mask( \ (__v4di)(__m256i)(A), (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U))) #define _mm256_maskz_srai_epi64(U, A, B) \ ((__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(B), \ (__v4di)_mm256_setzero_si256(), \ (__mmask8)(U))) #define _mm_srai_epi64(A, B) \ ((__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)_mm_setzero_si128(), \ (__mmask8)-1)) #define _mm_mask_srai_epi64(W, U, A, B) \ ((__m128i)__builtin_ia32_psraqi128_mask( \ (__v2di)(__m128i)(A), (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U))) #define _mm_maskz_srai_epi64(U, A, B) \ ((__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(B), \ (__v2di)_mm_setzero_si128(), \ (__mmask8)(U))) #define _mm256_mask_permutex_pd(W, U, A, B) \ ((__m256d)__builtin_ia32_permdf256_mask( \ (__v4df)(__m256d)(A), (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm256_maskz_permutex_pd(U, A, B) \ ((__m256d)__builtin_ia32_permdf256_mask( \ (__v4df)(__m256d)(A), (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), \ (__mmask8)(U))) #define _mm256_mask_permute_pd(W, U, X, C) \ ((__m256d)__builtin_ia32_vpermilpd256_mask( \ (__v4df)(__m256d)(X), (int)(C), (__v4df)(__m256d)(W), (__mmask8)(U))) #define _mm256_maskz_permute_pd(U, X, C) \ ((__m256d)__builtin_ia32_vpermilpd256_mask( \ (__v4df)(__m256d)(X), (int)(C), (__v4df)(__m256d)_mm256_setzero_pd(), \ (__mmask8)(U))) #define _mm256_mask_permute_ps(W, U, X, C) \ ((__m256)__builtin_ia32_vpermilps256_mask( \ (__v8sf)(__m256)(X), (int)(C), (__v8sf)(__m256)(W), (__mmask8)(U))) #define _mm256_maskz_permute_ps(U, X, C) \ ((__m256)__builtin_ia32_vpermilps256_mask( \ (__v8sf)(__m256)(X), (int)(C), (__v8sf)(__m256)_mm256_setzero_ps(), \ (__mmask8)(U))) #define _mm_mask_permute_pd(W, U, X, C) \ ((__m128d)__builtin_ia32_vpermilpd_mask( \ (__v2df)(__m128d)(X), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U))) #define _mm_maskz_permute_pd(U, X, C) \ ((__m128d)__builtin_ia32_vpermilpd_mask((__v2df)(__m128d)(X), (int)(C), \ (__v2df)(__m128d)_mm_setzero_pd(), \ (__mmask8)(U))) #define _mm_mask_permute_ps(W, U, X, C) \ ((__m128)__builtin_ia32_vpermilps_mask((__v4sf)(__m128)(X), (int)(C), \ (__v4sf)(__m128)(W), (__mmask8)(U))) #define _mm_maskz_permute_ps(U, X, C) \ ((__m128)__builtin_ia32_vpermilps_mask((__v4sf)(__m128)(X), (int)(C), \ (__v4sf)(__m128)_mm_setzero_ps(), \ (__mmask8)(U))) #define _mm256_mask_blend_pd(__U, __A, __W) \ ((__m256d)__builtin_ia32_blendmpd_256_mask((__v4df)(__A), (__v4df)(__W), \ (__mmask8)(__U))) #define _mm256_mask_blend_ps(__U, __A, __W) \ ((__m256)__builtin_ia32_blendmps_256_mask((__v8sf)(__A), (__v8sf)(__W), \ (__mmask8)(__U))) #define _mm256_mask_blend_epi64(__U, __A, __W) \ ((__m256i)__builtin_ia32_blendmq_256_mask((__v4di)(__A), (__v4di)(__W), \ (__mmask8)(__U))) #define _mm256_mask_blend_epi32(__U, __A, __W) \ ((__m256i)__builtin_ia32_blendmd_256_mask((__v8si)(__A), (__v8si)(__W), \ (__mmask8)(__U))) #define _mm_mask_blend_pd(__U, __A, __W) \ ((__m128d)__builtin_ia32_blendmpd_128_mask((__v2df)(__A), (__v2df)(__W), \ (__mmask8)(__U))) #define _mm_mask_blend_ps(__U, __A, __W) \ ((__m128)__builtin_ia32_blendmps_128_mask((__v4sf)(__A), (__v4sf)(__W), \ (__mmask8)(__U))) #define _mm_mask_blend_epi64(__U, __A, __W) \ ((__m128i)__builtin_ia32_blendmq_128_mask((__v2di)(__A), (__v2di)(__W), \ (__mmask8)(__U))) #define _mm_mask_blend_epi32(__U, __A, __W) \ ((__m128i)__builtin_ia32_blendmd_128_mask((__v4si)(__A), (__v4si)(__W), \ (__mmask8)(__U))) #define _mm256_cmp_epu32_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(P), (__mmask8)-1)) #define _mm256_cmp_epi64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(P), (__mmask8)-1)) #define _mm256_cmp_epi32_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(P), (__mmask8)-1)) #define _mm256_cmp_epu64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(P), (__mmask8)-1)) #define _mm256_cmp_pd_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (int)(P), (__mmask8)-1)) #define _mm256_cmp_ps_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpps256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (int)(P), (__mmask8)-1)) #define _mm256_mask_cmp_epi64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(P), (__mmask8)(M))) #define _mm256_mask_cmp_epi32_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(P), (__mmask8)(M))) #define _mm256_mask_cmp_epu64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq256_mask( \ (__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(P), (__mmask8)(M))) #define _mm256_mask_cmp_epu32_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpd256_mask( \ (__v8si)(__m256i)(X), (__v8si)(__m256i)(Y), (int)(P), (__mmask8)(M))) #define _mm256_mask_cmp_pd_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd256_mask( \ (__v4df)(__m256d)(X), (__v4df)(__m256d)(Y), (int)(P), (__mmask8)(M))) #define _mm256_mask_cmp_ps_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpps256_mask( \ (__v8sf)(__m256)(X), (__v8sf)(__m256)(Y), (int)(P), (__mmask8)(M))) #define _mm_cmp_epi64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(P), (__mmask8)-1)) #define _mm_cmp_epi32_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(P), (__mmask8)-1)) #define _mm_cmp_epu64_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(P), (__mmask8)-1)) #define _mm_cmp_epu32_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(P), (__mmask8)-1)) #define _mm_cmp_pd_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd128_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(P), (__mmask8)-1)) #define _mm_cmp_ps_mask(X, Y, P) \ ((__mmask8)__builtin_ia32_cmpps128_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(P), (__mmask8)-1)) #define _mm_mask_cmp_epi64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpq128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_epi32_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_epu64_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpq128_mask( \ (__v2di)(__m128i)(X), (__v2di)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_epu32_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_ucmpd128_mask( \ (__v4si)(__m128i)(X), (__v4si)(__m128i)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_pd_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmppd128_mask( \ (__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), (int)(P), (__mmask8)(M))) #define _mm_mask_cmp_ps_mask(M, X, Y, P) \ ((__mmask8)__builtin_ia32_cmpps128_mask( \ (__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (int)(P), (__mmask8)(M))) #endif #define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A)) #ifdef __DISABLE_AVX512VL__ #undef __DISABLE_AVX512VL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VL__ */ #endif /* _AVX512VLINTRIN_H_INCLUDED */
355,708
8,045
jart/cosmopolitan
false
cosmopolitan/third_party/intel/avx512vpopcntdqvlintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error \ "Never use <avx512vpopcntdqvlintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED #define _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512VL__) #pragma GCC push_options #pragma GCC target("avx512vpopcntdq,avx512vl") #define __DISABLE_AVX512VPOPCNTDQVL__ #endif /* __AVX512VPOPCNTDQVL__ */ __funline __m128i _mm_popcnt_epi32(__m128i __A) { return (__m128i)__builtin_ia32_vpopcountd_v4si((__v4si)__A); } __funline __m128i _mm_mask_popcnt_epi32(__m128i __A, __mmask16 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpopcountd_v4si_mask((__v4si)__A, (__v4si)__B, (__mmask16)__U); } __funline __m128i _mm_maskz_popcnt_epi32(__mmask16 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpopcountd_v4si_mask( (__v4si)__A, (__v4si)_mm_setzero_si128(), (__mmask16)__U); } __funline __m256i _mm256_popcnt_epi32(__m256i __A) { return (__m256i)__builtin_ia32_vpopcountd_v8si((__v8si)__A); } __funline __m256i _mm256_mask_popcnt_epi32(__m256i __A, __mmask16 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpopcountd_v8si_mask((__v8si)__A, (__v8si)__B, (__mmask16)__U); } __funline __m256i _mm256_maskz_popcnt_epi32(__mmask16 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpopcountd_v8si_mask( (__v8si)__A, (__v8si)_mm256_setzero_si256(), (__mmask16)__U); } __funline __m128i _mm_popcnt_epi64(__m128i __A) { return (__m128i)__builtin_ia32_vpopcountq_v2di((__v2di)__A); } __funline __m128i _mm_mask_popcnt_epi64(__m128i __A, __mmask8 __U, __m128i __B) { return (__m128i)__builtin_ia32_vpopcountq_v2di_mask((__v2di)__A, (__v2di)__B, (__mmask8)__U); } __funline __m128i _mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) { return (__m128i)__builtin_ia32_vpopcountq_v2di_mask( (__v2di)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); } __funline __m256i _mm256_popcnt_epi64(__m256i __A) { return (__m256i)__builtin_ia32_vpopcountq_v4di((__v4di)__A); } __funline __m256i _mm256_mask_popcnt_epi64(__m256i __A, __mmask8 __U, __m256i __B) { return (__m256i)__builtin_ia32_vpopcountq_v4di_mask((__v4di)__A, (__v4di)__B, (__mmask8)__U); } __funline __m256i _mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) { return (__m256i)__builtin_ia32_vpopcountq_v4di_mask( (__v4di)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); } #ifdef __DISABLE_AVX512VPOPCNTDQVL__ #undef __DISABLE_AVX512VPOPCNTDQVL__ #pragma GCC pop_options #endif /* __DISABLE_AVX512VPOPCNTDQVL__ */ #endif /* _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED */
2,909
79
jart/cosmopolitan
false
cosmopolitan/third_party/intel/shaintrin.internal.h
#ifndef _IMMINTRIN_H_INCLUDED #error "Never use <shaintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _SHAINTRIN_H_INCLUDED #define _SHAINTRIN_H_INCLUDED #ifndef __SHA__ #pragma GCC push_options #pragma GCC target("sha") #define __DISABLE_SHA__ #endif /* __SHA__ */ __funline __m128i _mm_sha1msg1_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_sha1msg1((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_sha1msg2_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_sha1msg2((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_sha1nexte_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_sha1nexte((__v4si)__A, (__v4si)__B); } #ifdef __OPTIMIZE__ __funline __m128i _mm_sha1rnds4_epu32(__m128i __A, __m128i __B, const int __I) { return (__m128i)__builtin_ia32_sha1rnds4((__v4si)__A, (__v4si)__B, __I); } #else #define _mm_sha1rnds4_epu32(A, B, I) \ ((__m128i)__builtin_ia32_sha1rnds4((__v4si)(__m128i)A, (__v4si)(__m128i)B, \ (int)I)) #endif __funline __m128i _mm_sha256msg1_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_sha256msg1((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_sha256msg2_epu32(__m128i __A, __m128i __B) { return (__m128i)__builtin_ia32_sha256msg2((__v4si)__A, (__v4si)__B); } __funline __m128i _mm_sha256rnds2_epu32(__m128i __A, __m128i __B, __m128i __C) { return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__A, (__v4si)__B, (__v4si)__C); } #ifdef __DISABLE_SHA__ #undef __DISABLE_SHA__ #pragma GCC pop_options #endif /* __DISABLE_SHA__ */ #endif /* _SHAINTRIN_H_INCLUDED */
1,726
55
jart/cosmopolitan
false
cosmopolitan/third_party/intel/xsavecintrin.internal.h
#if !defined _IMMINTRIN_H_INCLUDED #error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead." #endif #ifndef _XSAVECINTRIN_H_INCLUDED #define _XSAVECINTRIN_H_INCLUDED #ifndef __XSAVEC__ #pragma GCC push_options #pragma GCC target("xsavec") #define __DISABLE_XSAVEC__ #endif /* __XSAVEC__ */ __funline void _xsavec(void *__P, long long __M) { __builtin_ia32_xsavec(__P, __M); } #ifdef __x86_64__ __funline void _xsavec64(void *__P, long long __M) { __builtin_ia32_xsavec64(__P, __M); } #endif #ifdef __DISABLE_XSAVEC__ #undef __DISABLE_XSAVEC__ #pragma GCC pop_options #endif /* __DISABLE_XSAVEC__ */ #endif /* _XSAVECINTRIN_H_INCLUDED */
666
30
jart/cosmopolitan
false
cosmopolitan/third_party/finger/util.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/calls.h" #include "libc/calls/struct/stat.h" #include "libc/calls/struct/stat.macros.h" #include "libc/errno.h" #include "libc/fmt/fmt.h" #include "libc/mem/mem.h" #include "libc/paths.h" #include "libc/runtime/runtime.h" #include "libc/str/str.h" #include "libc/sysv/consts/o.h" #include "third_party/finger/finger.h" // clang-format off #ifndef lint /*static char sccsid[] = "from: @(#)util.c 5.14 (Berkeley) 1/17/91";*/ char util_rcsid[] = "$Id: util.c,v 1.18 1999/09/28 22:53:58 netbug Exp $"; #endif /* not lint */ #define HBITS 8 /* number of bits in hash code */ #define HSIZE (1 << 8) /* hash table size */ #define HMASK (HSIZE - 1) /* hash code mask */ static PERSON *htab[HSIZE]; /* the buckets */ static int hash(const char *name); static void find_idle_and_ttywrite(register WHERE *w) { struct stat sb; /* No device for X console. Utmp entry by XDM login (":0"). */ if (w->tty[0] == ':') { w->idletime = 0; /* would be nice to have it emit ??? */ w->writable = 0; return; } snprintf(tbuf, TBUFLEN, "%s/%s", _PATH_DEV, w->tty); if (stat(tbuf, &sb) < 0) { eprintf("finger: %s: %s\n", tbuf, strerror(errno)); return; } w->idletime = now < sb.st_atime ? 0 : now - sb.st_atime; #define TALKABLE 0220 /* tty is writable if 220 mode */ w->writable = ((sb.st_mode & TALKABLE) == TALKABLE); } static void userinfo(PERSON *pn, struct passwd *pw) { char *p; struct stat sb; char *bp; char *rname; int i, j, ct; char *fields[4]; int nfields; pn->uid = pw->pw_uid; pn->name = strdup(pw->pw_name); pn->dir = strdup(pw->pw_dir); pn->shell = strdup(pw->pw_shell); /* make a private copy of gecos to munge */ strncpy(tbuf, pw->pw_gecos, TBUFLEN); tbuf[TBUFLEN-1] = 0; /* ensure null termination */ bp = tbuf; /* why do we skip asterisks!?!? */ if (*bp == '*') ++bp; /* * fields[0] -> real name * fields[1] -> office * fields[2] -> officephone * fields[3] -> homephone */ nfields = 0; for (p = strtok(bp, ","); p; p = strtok(NULL, ",")) { if (*p==0) p = NULL; // skip empties if (nfields < 4) fields[nfields++] = p; } while (nfields<4) fields[nfields++] = NULL; if (fields[0]) { /* * Ampersands in gecos get replaced by the capitalized login * name. This is a major nuisance and whoever thought it up * should be shot. */ p = fields[0]; /* First, count the number of ampersands. */ for (ct=i=0; p[i]; i++) if (p[i]=='&') ct++; /* This tells us how much space we need to copy the name. */ rname = malloc(strlen(p) + ct*strlen(pw->pw_name) + 1); if (!rname) { eprintf("finger: Out of space.\n"); exit(1); } /* Now, do it */ for (i=j=0; p[i]; i++) { if (p[i]=='&') { strcpy(rname + j, pw->pw_name); if (islower(rname[j])) { rname[j] = toupper(rname[j]); } j += strlen(pw->pw_name); } else { rname[j++] = p[i]; } } rname[j] = 0; pn->realname = rname; } pn->office = fields[1] ? strdup(fields[1]) : NULL; pn->officephone = fields[2] ? strdup(fields[2]) : NULL; pn->homephone = fields[3] ? strdup(fields[3]) : NULL; pn->mailrecv = -1; /* -1 == not_valid */ pn->mailread = -1; /* -1 == not_valid */ snprintf(tbuf, TBUFLEN, "%s/%s", _PATH_MAILDIR, pw->pw_name); if (stat(tbuf, &sb) < 0) { if (errno != ENOENT) { eprintf("finger: %s: %s\n", tbuf, strerror(errno)); return; } } else if (sb.st_size != 0) { pn->mailrecv = sb.st_mtime; pn->mailread = sb.st_atime; } } int match(struct passwd *pw, const char *user) { char *p; int i, j, ct, rv=0; char *rname; strncpy(tbuf, pw->pw_gecos, TBUFLEN); tbuf[TBUFLEN-1] = 0; /* guarantee null termination */ p = tbuf; /* why do we skip asterisks!?!? */ if (*p == '*') ++p; /* truncate the uninteresting stuff off the end of gecos */ p = strtok(p, ","); if (!p) return 0; /* * Ampersands get replaced by the login name. */ /* First, count the number of ampersands. */ for (ct=i=0; p[i]; i++) if (p[i]=='&') ct++; /* This tells us how much space we need to copy the name. */ rname = malloc(strlen(p) + ct*strlen(pw->pw_name) + 1); if (!rname) { eprintf("finger: Out of space.\n"); exit(1); } /* Now, do it */ for (i=j=0; p[i]; i++) { if (p[i]=='&') { strcpy(rname + j, pw->pw_name); if (islower(rname[j])) rname[j] = toupper(rname[j]); j += strlen(pw->pw_name); } else { rname[j++] = p[i]; } } rname[j] = 0; for (p = strtok(rname, "\t "); p && !rv; p = strtok(NULL, "\t ")) { if (!strcasecmp(p, user)) rv = 1; } free(rname); return rv; } static int get_lastlog(int fd, uid_t uid, struct lastlog *ll) { loff_t pos; if (fd == -1) return -1; pos = (long)uid * sizeof(*ll); if (lseek(fd, pos, SEEK_SET) != pos) return -1; if (read(fd, ll, sizeof(*ll)) != sizeof(*ll)) return -1; return 0; } void enter_lastlog(PERSON *pn) { static int opened = 0, fd = -1; WHERE *w; struct lastlog ll; int doit = 0; /* some systems may not maintain lastlog, don't report errors. */ if (!opened) { fd = open(_PATH_LASTLOG, O_RDONLY, 0); opened = 1; } if (get_lastlog(fd, pn->uid, &ll)) { /* as if never logged in */ ll.ll_line[0] = ll.ll_host[0] = '\0'; ll.ll_time = 0; } if ((w = pn->whead) == NULL) doit = 1; else if (ll.ll_time != 0) { /* if last login is earlier than some current login */ for (; !doit && w != NULL; w = w->next) if (w->info == LOGGEDIN && w->loginat < ll.ll_time) doit = 1; /* * and if it's not any of the current logins * can't use time comparison because there may be a small * discrepency since login calls time() twice */ for (w = pn->whead; doit && w != NULL; w = w->next) if (w->info == LOGGEDIN && strncmp(w->tty, ll.ll_line, UT_LINESIZE) == 0) doit = 0; } if (doit) { w = walloc(pn); w->info = LASTLOG; bcopy(ll.ll_line, w->tty, UT_LINESIZE); w->tty[UT_LINESIZE] = 0; bcopy(ll.ll_host, w->host, UT_HOSTSIZE); w->host[UT_HOSTSIZE] = 0; w->loginat = ll.ll_time; } } void enter_where(struct utmp *ut, PERSON *pn) { register WHERE *w = walloc(pn); w->info = LOGGEDIN; bcopy(ut->ut_line, w->tty, UT_LINESIZE); w->tty[UT_LINESIZE] = 0; bcopy(ut->ut_host, w->host, UT_HOSTSIZE); w->host[UT_HOSTSIZE] = 0; w->loginat = ut->ut_time; find_idle_and_ttywrite(w); } PERSON * enter_person(struct passwd *pw) { register PERSON *pn, **pp; for (pp = htab + hash(pw->pw_name); *pp != NULL && strcmp((*pp)->name, pw->pw_name) != 0; pp = &(*pp)->hlink) ; if ((pn = *pp) == NULL) { pn = palloc(); entries++; if (phead == NULL) phead = ptail = pn; else { ptail->next = pn; ptail = pn; } pn->next = NULL; pn->hlink = NULL; *pp = pn; userinfo(pn, pw); pn->whead = NULL; } return(pn); } PERSON *find_person(const char *name) { register PERSON *pn; /* name may be only UT_NAMESIZE long and not terminated */ for (pn = htab[hash(name)]; pn != NULL && strncmp(pn->name, name, UT_NAMESIZE) != 0; pn = pn->hlink) ; return(pn); } static int hash(const char *name) { register int h, i; h = 0; /* name may be only UT_NAMESIZE long and not terminated */ for (i = UT_NAMESIZE; --i >= 0 && *name;) h = ((h << 2 | h >> (HBITS - 2)) ^ *name++) & HMASK; return(h); } PERSON *palloc(void) { PERSON *p; if ((p = (PERSON *)malloc((unsigned) sizeof(PERSON))) == NULL) { eprintf("finger: Out of space.\n"); exit(1); } return(p); } WHERE * walloc(PERSON *pn) { register WHERE *w; if ((w = (WHERE *)malloc((unsigned) sizeof(WHERE))) == NULL) { eprintf("finger: Out of space.\n"); exit(1); } if (pn->whead == NULL) pn->whead = pn->wtail = w; else { pn->wtail->next = w; pn->wtail = w; } w->next = NULL; return(w); } const char * prphone(const char *num) { char *p; const char *q; int len; static char pbuf[15]; /* don't touch anything if the user has their own formatting */ for (q = num; *q; ++q) if (!isdigit(*q)) return(num); len = q - num; p = pbuf; switch(len) { case 11: /* +0-123-456-7890 */ *p++ = '+'; *p++ = *num++; *p++ = '-'; /* FALLTHROUGH */ case 10: /* 012-345-6789 */ *p++ = *num++; *p++ = *num++; *p++ = *num++; *p++ = '-'; /* FALLTHROUGH */ case 7: /* 012-3456 */ *p++ = *num++; *p++ = *num++; *p++ = *num++; break; case 5: /* x0-1234 */ case 4: /* x1234 */ *p++ = 'x'; *p++ = *num++; break; default: return num; } if (len != 4) { *p++ = '-'; *p++ = *num++; } *p++ = *num++; *p++ = *num++; *p++ = *num++; *p = '\0'; return(pbuf); }
10,513
417
jart/cosmopolitan
false
cosmopolitan/third_party/finger/lprint.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/calls.h" #include "libc/calls/struct/stat.h" #include "libc/fmt/fmt.h" #include "libc/paths.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" #include "libc/sysv/consts/o.h" #include "libc/sysv/consts/s.h" #include "libc/time/struct/tm.h" #include "libc/time/time.h" #include "third_party/finger/finger.h" // clang-format off /* * from: @(#)lprint.c 5.13 (Berkeley) 10/31/90 */ char lprint_rcsid[] = "$Id: lprint.c,v 1.11 1999/09/14 10:51:11 dholland Exp $"; static void lprint(PERSON *pn); static int demi_print(char *str, int oddfield); static int show_text(const char *directory, const char *file_name, const char *header); #define LINE_LEN 80 #define TAB_LEN 8 /* 8 spaces between tabs */ #define _PATH_FORWARD ".forward" #define _PATH_PLAN ".plan" #define _PATH_PROJECT ".project" #define _PATH_PGPKEY ".pgpkey" void lflag_print(void) { register PERSON *pn = phead; while (1) { lprint(pn); if (!pplan) { show_text(pn->dir, _PATH_PGPKEY, "PGP key:\n"); show_text(pn->dir, _PATH_PROJECT, "Project:\n"); if (!show_text(pn->dir, _PATH_PLAN, "Plan:\n")) { xprintf("No Plan.\n"); } } if (!(pn = pn->next)) break; xputc('\n'); } } static void lprint(PERSON *pn) { struct tm *delta, *tp; WHERE *w; int cpr, len, maxlen; int oddfield; char timebuf[128]; /* * long format -- * login name * real name * home directory * shell * office, office phone, home phone if available */ xprintf("Login: %-15s\t\t\tName: %s\nDirectory: %-25s", pn->name, pn->realname, pn->dir); xprintf("\tShell: %-s\n", *pn->shell ? pn->shell : _PATH_BSHELL); /* * try and print office, office phone, and home phone on one line; * if that fails, do line filling so it looks nice. */ #define OFFICE_TAG "Office" #define OFFICE_PHONE_TAG "Office Phone" oddfield = 0; if (pn->office && pn->officephone && strlen(pn->office) + strlen(pn->officephone) + sizeof(OFFICE_TAG) + 2 <= 5 * TAB_LEN) { snprintf(tbuf, TBUFLEN, "%s: %s, %s", OFFICE_TAG, pn->office, prphone(pn->officephone)); oddfield = demi_print(tbuf, oddfield); } else { if (pn->office) { snprintf(tbuf, TBUFLEN, "%s: %s", OFFICE_TAG, pn->office); oddfield = demi_print(tbuf, oddfield); } if (pn->officephone) { snprintf(tbuf, TBUFLEN, "%s: %s", OFFICE_PHONE_TAG, prphone(pn->officephone)); oddfield = demi_print(tbuf, oddfield); } } if (pn->homephone) { snprintf(tbuf, TBUFLEN, "%s: %s", "Home Phone", prphone(pn->homephone)); oddfield = demi_print(tbuf, oddfield); } if (oddfield) xputc('\n'); /* * long format con't: * if logged in * terminal * idle time * if messages allowed * where logged in from * if not logged in * when last logged in */ /* find out longest device name for this user for formatting */ for (w = pn->whead, maxlen = -1; w != NULL; w = w->next) if ((len = strlen(w->tty)) > maxlen) maxlen = len; /* find rest of entries for user */ for (w = pn->whead; w != NULL; w = w->next) { switch (w->info) { case LOGGEDIN: tp = localtime(&w->loginat); /* * t = asctime(tp); * tzset(); * tzn = tzname[daylight]; * cpr = printf("On since %.16s (%s) on %s", * t, tzn, w->tty); */ strftime(timebuf, sizeof(timebuf), "%a %b %e %R (%Z)", tp); cpr = xprintf("On since %s on %s", timebuf, w->tty); if (*w->host) { cpr += xprintf(" from %s", w->host); } /* * idle time is tough; if have one, print a comma, * then spaces to pad out the device name, then the * idle time. Follow with a comma if a remote login. */ delta = gmtime(&w->idletime); if (delta->tm_yday || delta->tm_hour || delta->tm_min || delta->tm_sec) { if (*w->host) xputc('\n'); cpr += xprintf("%-*s", (int) (maxlen - strlen(w->tty) + 3), ""); if (delta->tm_yday > 0) { cpr += xprintf("%d day%s ", delta->tm_yday, delta->tm_yday == 1 ? "" : "s"); } if (delta->tm_hour > 0) { cpr += xprintf("%d hour%s ", delta->tm_hour, delta->tm_hour == 1 ? "" : "s"); } if ((delta->tm_min > 0) && !delta->tm_yday) { cpr += xprintf("%d minute%s ", delta->tm_min, delta->tm_min == 1 ? "" : "s"); } if ((delta->tm_sec > 0) && !delta->tm_yday && !delta->tm_hour) { cpr += xprintf("%d second%s ", delta->tm_sec, delta->tm_sec == 1 ? "" : "s"); } cpr += xprintf("idle"); } if (!w->writable) { if (delta->tm_yday || delta->tm_hour || delta->tm_min || delta->tm_sec) cpr += xprintf("\n "); cpr += xprintf(" (messages off)"); } break; case LASTLOG: if (w->loginat == 0) { (void)xprintf("Never logged in."); break; } tp = localtime(&w->loginat); /* * t = asctime(tp); * tzset(); * tzn = tzname[daylight]; * if(now - w->loginat > SECSPERDAY * DAYSPERNYEAR / 2) * cpr = * printf("Last login %.16s %.4s (%s) on %s", * t, t + 20, tzn, w->tty); * else * cpr = printf("Last login %.16s (%s) on %s", * t, tzn, w->tty); */ if (now - w->loginat < SECSPERDAY * DAYSPERNYEAR / 2) { strftime(timebuf, sizeof(timebuf), "%a %b %e %R (%Z)", tp); } else { strftime(timebuf, sizeof(timebuf), "%a %b %e %R %Y (%Z)", tp); } cpr = xprintf("Last login %s on %s", timebuf, w->tty); if (*w->host) { cpr += xprintf(" from %s", w->host); } break; } xputc('\n'); } /* If the user forwards mail elsewhere, tell us about it */ show_text(pn->dir, _PATH_FORWARD, "Mail forwarded to "); /* Print the standard mailbox information. */ if (pn->mailrecv == -1) xprintf("No mail.\n"); else if (pn->mailrecv > pn->mailread) { tp = localtime(&pn->mailrecv); /* * t = asctime(tp); * tzset(); * tzn = tzname[daylight]; * printf("New mail received %.16s %.4s (%s)\n", t, * t + 20, tzn); */ strftime(timebuf, sizeof(timebuf), "%a %b %e %R %Y (%Z)", tp); xprintf("New mail received %s\n", timebuf); tp = localtime(&pn->mailread); /* * t = asctime(tp); * tzset(); * tzn = tzname[daylight]; * printf(" Unread since %.16s %.4s (%s)\n", t, * t + 20, tzn); */ strftime(timebuf, sizeof(timebuf), "%a %b %e %R %Y (%Z)", tp); xprintf(" Unread since %s\n", timebuf); } else { tp = localtime(&pn->mailread); /* * t = asctime(tp); * tzset(); * tzn = tzname[daylight]; * printf("Mail last read %.16s %.4s (%s)\n", t, * t + 20, tzn); */ strftime(timebuf, sizeof(timebuf), "%a %b %e %R %Y (%Z)", tp); xprintf("Mail last read %s\n", timebuf); } } static int demi_print(char *str, int oddfield) { static int lenlast; int lenthis, maxlen; lenthis = strlen(str); if (oddfield) { /* * We left off on an odd number of fields. If we haven't * crossed the midpoint of the screen, and we have room for * the next field, print it on the same line; otherwise, * print it on a new line. * * Note: we insist on having the right hand fields start * no less than 5 tabs out. */ maxlen = 5 * TAB_LEN; if (maxlen < lenlast) maxlen = lenlast; if (((((maxlen / TAB_LEN) + 1) * TAB_LEN) + lenthis) <= LINE_LEN) { while(lenlast < (4 * TAB_LEN)) { xputc('\t'); lenlast += TAB_LEN; } (void)xprintf("\t%s\n", str); /* force one tab */ } else { (void)xprintf("\n%s", str); /* go to next line */ oddfield = !oddfield; /* this'll be undone below */ } } else (void)xprintf("%s", str); oddfield = !oddfield; /* toggle odd/even marker */ lenlast = lenthis; return(oddfield); } static int show_text(const char *directory, const char *file_name, const char *header) { int ch, lastc = 0, fd; FILE *fp; struct stat sbuf1, sbuf2; snprintf(tbuf, TBUFLEN, "%s/%s", directory, file_name); if (lstat(tbuf, &sbuf1) || !S_ISREG(sbuf1.st_mode)) return 0; fd = open(tbuf, O_RDONLY); if (fd<0) return 0; if (fstat(fd, &sbuf2)) { close(fd); return 0; } /* if we didn't get the same file both times, bail */ if (sbuf1.st_dev!=sbuf2.st_dev || sbuf1.st_ino!=sbuf2.st_ino) { close(fd); return 0; } fp = fdopen(fd, "r"); if (fp == NULL) { close(fd); return 0; } xprintf("%s", header); while ((ch = getc(fp)) != EOF) { xputc(ch); lastc = ch; } if (lastc != '\n') xputc('\n'); fclose(fp); return 1; }
10,413
363
jart/cosmopolitan
false
cosmopolitan/third_party/finger/net.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/calls.h" #include "libc/dns/ent.h" #include "libc/errno.h" #include "libc/sock/sock.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" #include "libc/sysv/consts/af.h" #include "libc/sysv/consts/sock.h" #include "third_party/finger/finger.h" // clang-format off #ifndef lint /*static char sccsid[] = "from: @(#)net.c 5.5 (Berkeley) 6/1/90";*/ char net_rcsid[] = "$Id: net.c,v 1.9 1999/09/14 10:51:11 dholland Exp $"; #endif /* not lint */ void netfinger(const char *name) { register FILE *fp; struct in_addr defaddr; register int c, sawret, ateol; struct hostent *hp, def; struct servent *sp; struct sockaddr_in sn; int s; char *alist[1], *host; host = strrchr(name, '@'); if (!host) return; *host++ = '\0'; memset(&sn, 0, sizeof(sn)); sp = getservbyname("finger", "tcp"); if (!sp) { eprintf("finger: tcp/finger: unknown service\n"); return; } sn.sin_port = sp->s_port; hp = gethostbyname(host); if (!hp) { if (!inet_aton(host, &defaddr)) { eprintf("finger: unknown host: %s\n", host); return; } def.h_name = host; def.h_addr_list = alist; def.h_addr = (char *)&defaddr; def.h_length = sizeof(struct in_addr); def.h_addrtype = AF_INET; def.h_aliases = 0; hp = &def; } sn.sin_family = hp->h_addrtype; if (hp->h_length > (int)sizeof(sn.sin_addr)) { hp->h_length = sizeof(sn.sin_addr); } memcpy(&sn.sin_addr, hp->h_addr, hp->h_length); if ((s = socket(hp->h_addrtype, SOCK_STREAM, 0)) < 0) { eprintf("finger: socket: %s\n", strerror(errno)); return; } /* print hostname before connecting, in case it takes a while */ xprintf("[%s]\n", hp->h_name); if (connect(s, (struct sockaddr *)&sn, sizeof(sn)) < 0) { eprintf("finger: connect: %s\n", strerror(errno)); close(s); return; } /* -l flag for remote fingerd */ if (lflag) write(s, "/W ", 3); /* send the name followed by <CR><LF> */ write(s, name, strlen(name)); write(s, "\r\n", 2); /* * Read from the remote system; once we're connected, we assume some * data. If none arrives, we hang until the user interrupts. * * If we see a <CR> or a <CR> with the high bit set, treat it as * a newline; if followed by a newline character, only output one * newline. * * Text is sent to xputc() for printability analysis. */ fp = fdopen(s, "r"); if (!fp) { eprintf("finger: fdopen: %s\n", strerror(errno)); close(s); return; } sawret = 0; ateol = 1; while ((c = getc(fp)) != EOF) { c &= 0xff; if (c == ('\r'|0x80) || c == ('\n'|0x80)) c &= 0x7f; if (c == '\r') { sawret = ateol = 1; xputc('\n'); } else if (sawret && c == '\n') { sawret = 0; /* don't print */ } else { if (c == '\n') ateol = 1; sawret = 0; xputc(c); } } if (!ateol) xputc('\n'); fclose(fp); }
4,746
154
jart/cosmopolitan
false
cosmopolitan/third_party/finger/finger.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_FINGER THIRD_PARTY_FINGER_ARTIFACTS += THIRD_PARTY_FINGER_A THIRD_PARTY_FINGER = $(THIRD_PARTY_FINGER_A_DEPS) $(THIRD_PARTY_FINGER_A) THIRD_PARTY_FINGER_A = o/$(MODE)/third_party/finger/finger.a THIRD_PARTY_FINGER_A_FILES := $(wildcard third_party/finger/*) THIRD_PARTY_FINGER_A_HDRS = $(filter %.h,$(THIRD_PARTY_FINGER_A_FILES)) THIRD_PARTY_FINGER_A_INCS = $(filter %.inc,$(THIRD_PARTY_FINGER_A_FILES)) THIRD_PARTY_FINGER_A_SRCS = $(filter %.c,$(THIRD_PARTY_FINGER_A_FILES)) THIRD_PARTY_FINGER_A_OBJS = $(THIRD_PARTY_FINGER_A_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_FINGER_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_FMT \ LIBC_INTRIN \ LIBC_MEM \ LIBC_NEXGEN32E \ LIBC_RUNTIME \ LIBC_STDIO \ LIBC_STR \ LIBC_STUBS \ LIBC_SYSV \ LIBC_DNS \ LIBC_SOCK \ LIBC_TIME \ THIRD_PARTY_MUSL \ THIRD_PARTY_GETOPT THIRD_PARTY_FINGER_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_FINGER_A_DIRECTDEPS),$($(x)))) THIRD_PARTY_FINGER_A_CHECKS = \ $(THIRD_PARTY_FINGER_A).pkg $(THIRD_PARTY_FINGER_A): \ third_party/finger/ \ $(THIRD_PARTY_FINGER_A).pkg \ $(THIRD_PARTY_FINGER_A_OBJS) $(THIRD_PARTY_FINGER_A).pkg: \ $(THIRD_PARTY_FINGER_A_OBJS) \ $(foreach x,$(THIRD_PARTY_FINGER_A_DIRECTDEPS),$($(x)_A).pkg) o/$(MODE)/third_party/finger/finger.com.dbg: \ $(THIRD_PARTY_FINGER) \ o/$(MODE)/third_party/finger/finger.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) THIRD_PARTY_FINGER_COMS = o/$(MODE)/third_party/finger/finger.com THIRD_PARTY_FINGER_BINS = $(THIRD_PARTY_FINGER_COMS) $(THIRD_PARTY_FINGER_COMS:%=%.dbg) THIRD_PARTY_FINGER_LIBS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x))) THIRD_PARTY_FINGER_SRCS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_FINGER_HDRS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_FINGER_INCS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x)_INCS)) THIRD_PARTY_FINGER_CHECKS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_FINGER_OBJS = $(foreach x,$(THIRD_PARTY_FINGER_ARTIFACTS),$($(x)_OBJS)) $(THIRD_PARTY_FINGER_OBJS): third_party/finger/finger.mk .PHONY: o/$(MODE)/third_party/finger o/$(MODE)/third_party/finger: \ $(THIRD_PARTY_FINGER_BINS) \ $(THIRD_PARTY_FINGER_CHECKS)
2,531
68
jart/cosmopolitan
false
cosmopolitan/third_party/finger/finger.1
.\" Copyright (c) 1989, 1990 The Regents of the University of California. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. All advertising materials mentioning features or use of this software .\" must display the following acknowledgement: .\" This product includes software developed by the University of .\" California, Berkeley and its contributors. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" from: @(#)finger.1 6.14 (Berkeley) 7/27/91 .\" $Id: finger.1,v 1.18 2000/07/30 23:56:57 dholland Exp $ .\" .Dd August 15, 1999 .Dt FINGER 1 .Os "Linux NetKit (0.17)" .Sh NAME .Nm finger .Nd user information lookup program .Sh SYNOPSIS .Nm finger .Op Fl lmsp .Op Ar user ... .Op Ar user@host ... .Sh DESCRIPTION The .Nm finger displays information about the system users. .Pp Options are: .Bl -tag -width flag .It Fl s .Nm Finger displays the user's login name, real name, terminal name and write status (as a ``*'' after the terminal name if write permission is denied), idle time, login time, office location and office phone number. .Pp Login time is displayed as month, day, hours and minutes, unless more than six months ago, in which case the year is displayed rather than the hours and minutes. .Pp Unknown devices as well as nonexistent idle and login times are displayed as single asterisks. .Pp .It Fl l Produces a multi-line format displaying all of the information described for the .Fl s option as well as the user's home directory, home phone number, login shell, mail status, and the contents of the files .Dq Pa .plan , .Dq Pa .project , .Dq Pa .pgpkey and .Dq Pa .forward from the user's home directory. .Pp Phone numbers specified as eleven digits are printed as ``+N-NNN-NNN-NNNN''. Numbers specified as ten or seven digits are printed as the appropriate subset of that string. Numbers specified as five digits are printed as ``xN-NNNN''. Numbers specified as four digits are printed as ``xNNNN''. .Pp If write permission is denied to the device, the phrase ``(messages off)'' is appended to the line containing the device name. One entry per user is displayed with the .Fl l option; if a user is logged on multiple times, terminal information is repeated once per login. .Pp Mail status is shown as ``No Mail.'' if there is no mail at all, ``Mail last read DDD MMM ## HH:MM YYYY (TZ)'' if the person has looked at their mailbox since new mail arriving, or ``New mail received ...'', `` Unread since ...'' if they have new mail. .Pp .It Fl p Prevents the .Fl l option of .Nm finger from displaying the contents of the .Dq Pa .plan , .Dq Pa .project and .Dq Pa .pgpkey files. .It Fl m Prevent matching of .Ar user names. .Ar User is usually a login name; however, matching will also be done on the users' real names, unless the .Fl m option is supplied. All name matching performed by .Nm finger is case insensitive. .El .Pp If no options are specified, .Nm finger defaults to the .Fl l style output if operands are provided, otherwise to the .Fl s style. Note that some fields may be missing, in either format, if information is not available for them. .Pp If no arguments are specified, .Nm finger will print an entry for each user currently logged into the system. .Pp .Nm Finger may be used to look up users on a remote machine. The format is to specify a .Ar user as .Dq Li user@host , or .Dq Li @host , where the default output format for the former is the .Fl l style, and the default output format for the latter is the .Fl s style. The .Fl l option is the only option that may be passed to a remote machine. .Pp If standard output is a socket, .Nm finger will emit a carriage return (^M) before every linefeed (^J). This is for processing remote finger requests when invoked by .Xr fingerd 8 . .Sh FILES .Bl -tag -width mmmmmmmmmmmmmmm .It Pa ~/.nofinger If finger finds this file in a user's home directory, it will, for finger requests originating outside the local host, firmly deny the existence of that user. For this to work, the finger program, as started by .Xr fingerd 8 , must be able to see the .Pa .nofinger file. This generally means that the home directory containing the file must have the other-users-execute bit set (o+w). See .Xr chmod 1 . If you use this feature for privacy, please test it with ``finger @localhost'' before relying on it, just in case. .It ~/.plan .It ~/.project .It ~/.pgp These files are printed as part of a long-format request. The .Pa .project file is limited to one line; the .Pa .plan file may be arbitrarily long. .El .Sh SEE ALSO .Xr chfn 1 , .Xr passwd 1 , .Xr w 1 , .Xr who 1 .Sh HISTORY The .Nm finger command appeared in .Bx 3.0 .
6,071
195
jart/cosmopolitan
false
cosmopolitan/third_party/finger/finger.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/calls.h" #include "libc/calls/struct/stat.h" #include "libc/calls/weirdtypes.h" #include "libc/fmt/fmt.h" #include "libc/mem/mem.h" #include "libc/nt/struct/msg.h" #include "libc/runtime/runtime.h" #include "libc/runtime/utmp.h" #include "libc/sock/sock.h" #include "libc/sock/struct/sockaddr.h" #include "libc/str/str.h" #include "libc/sysv/consts/fileno.h" #include "libc/time/time.h" #include "third_party/finger/finger.h" #include "third_party/getopt/getopt.h" #include "third_party/musl/passwd.h" // clang-format off /* * Mail status reporting added 931007 by Luke Mewburn, <[email protected]>. */ char copyright[] = "@(#) Copyright (c) 1989 The Regents of the University of California.\n" "All rights reserved.\n"; /* * from: @(#)finger.c 5.22 (Berkeley) 6/29/90 */ char finger_rcsid[] = \ "$Id: finger.c,v 1.15 1999/12/18 16:41:51 dholland Exp $"; /* * Finger prints out information about users. It is not portable since * certain fields (e.g. the full user name, office, and phone numbers) are * extracted from the gecos field of the passwd file which other UNIXes * may not have or may use for other things. (This is not really true any * more, btw.) * * There are currently two output formats; the short format is one line * per user and displays login name, tty, login time, real name, idle time, * and office location/phone number. The long format gives the same * information (in a more legible format) as well as home directory, shell, * mail info, and .plan/.project files. */ static void loginlist(void); static void userlist(int argc, char *argv[]); int lflag, pplan; static int sflag, mflag; static int enable_nofinger; time_t now; char tbuf[TBUFLEN]; PERSON *phead, *ptail; /* the linked list of all people */ int entries; /* number of people */ int main(int argc, char *argv[]) { int ch; struct sockaddr_in sin; socklen_t slen = sizeof(sin); while ((ch = getopt(argc, argv, "lmps")) != EOF) { switch(ch) { case 'l': lflag = 1; /* long format */ break; case 'm': mflag = 1; /* do exact match of names */ break; case 'p': pplan = 1; /* don't show .plan/.project */ break; case 's': sflag = 1; /* short format */ break; case '?': case 'h': default: eprintf("usage: finger [-lmps] [login ...]\n"); return 1; } } argc -= optind; argv += optind; if (getsockname(STDOUT_FILENO, (struct sockaddr *)&sin, &slen)==0) { /* * stdout is a socket. must be a network finger request, * so emit CRs with our LFs at the ends of lines. */ set_crmode(); /* * Also, enable .nofinger processing. */ enable_nofinger = 1; } /* * Also check stdin for nofinger processing, because of older * fingerds that make stdout a pipe for CRLF handling. */ if (getsockname(STDIN_FILENO, (struct sockaddr *)&sin, &slen)==0) { enable_nofinger = 1; } time(&now); setpwent(); if (!*argv) { /* * Assign explicit "small" format if no names given and -l * not selected. Force the -s BEFORE we get names so proper * screening will be done. */ if (!lflag) { sflag = 1; /* if -l not explicit, force -s */ } loginlist(); if (entries == 0) { xprintf("No one logged on.\n"); } } else { userlist(argc, argv); /* * Assign explicit "large" format if names given and -s not * explicitly stated. Force the -l AFTER we get names so any * remote finger attempts specified won't be mishandled. */ if (!sflag) lflag = 1; /* if -s not explicit, force -l */ } if (entries != 0) { if (lflag) lflag_print(); else sflag_print(); } return 0; } /* Returns 1 if .nofinger is found and enable_nofinger is set. */ static int check_nofinger(struct passwd *pw) { if (enable_nofinger) { char path[PATH_MAX]; struct stat tripe; snprintf(path, sizeof(path), "%s/.nofinger", pw->pw_dir); if (stat(path, &tripe)==0) { return 1; } } return 0; } static void loginlist(void) { PERSON *pn; struct passwd *pw; struct utmp *uptr; char name[UT_NAMESIZE + 1]; name[UT_NAMESIZE] = '\0'; /* * if (!freopen(_PATH_UTMP, "r", stdin)) { * fprintf(stderr, "finger: can't read %s.\n", _PATH_UTMP); * exit(2); * } */ setutent(); while ((uptr = getutent())!=NULL) { if (!uptr->ut_name[0]) continue; #ifdef USER_PROCESS if (uptr->ut_type != USER_PROCESS) continue; #endif if ((pn = find_person(uptr->ut_name)) == NULL) { memcpy(name, uptr->ut_name, UT_NAMESIZE); if ((pw = getpwnam(name)) == NULL) continue; if (check_nofinger(pw)) continue; pn = enter_person(pw); } enter_where(uptr, pn); } for (pn = phead; lflag && pn != NULL; pn = pn->next) enter_lastlog(pn); endutent(); } static void do_local(int argc, char *argv[], int *used) { int i; struct passwd *pw; /* * traverse the list of possible login names and check the login name * and real name against the name specified by the user. */ if (mflag) { for (i = 0; i < argc; i++) if (used[i] >= 0 && (pw = getpwnam(argv[i]))) { if (!check_nofinger(pw)) { enter_person(pw); used[i] = 1; } } } else for (pw = getpwent(); pw; pw = getpwent()) for (i = 0; i < argc; i++) if (used[i] >= 0 && (!strcasecmp(pw->pw_name, argv[i]) || match(pw, argv[i]))) { if (!check_nofinger(pw)) { enter_person(pw); used[i] = 1; } } /* list errors */ for (i = 0; i < argc; i++) if (!used[i]) (void)eprintf("finger: %s: no such user.\n", argv[i]); } static void userlist(int argc, char *argv[]) { int i; PERSON *pn; PERSON *nethead, **nettail; struct utmp *uptr; int dolocal, *used; used = calloc(argc, sizeof(int)); if (!used) { eprintf("finger: out of space.\n"); exit(1); } /* pull out all network requests */ for (i = 0, dolocal = 0, nettail = &nethead; i < argc; i++) { if (!strchr(argv[i], '@')) { dolocal = 1; continue; } pn = palloc(); *nettail = pn; nettail = &pn->next; pn->name = argv[i]; used[i] = -1; } *nettail = NULL; if (dolocal) do_local(argc, argv, used); /* handle network requests */ for (pn = nethead; pn; pn = pn->next) { netfinger(pn->name); if (pn->next || entries) xputc('\n'); } if (entries == 0) return; /* * Scan thru the list of users currently logged in, saving * appropriate data whenever a match occurs. */ /* * if (!freopen(_PATH_UTMP, "r", stdin)) { * (void)fprintf( stderr, "finger: can't read %s.\n", _PATH_UTMP); * exit(1); * } */ setutent(); while ((uptr = getutent())!=NULL) { if (!uptr->ut_name[0]) continue; #ifdef USER_PROCESS if (uptr->ut_type != USER_PROCESS) continue; #endif if ((pn = find_person(uptr->ut_name)) == NULL) { continue; } enter_where(uptr, pn); } for (pn = phead; pn != NULL; pn = pn->next) { enter_lastlog(pn); } endutent(); }
8,812
338
jart/cosmopolitan
false
cosmopolitan/third_party/finger/sprint.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/mem/alg.h" #include "libc/mem/mem.h" #include "libc/runtime/runtime.h" #include "libc/str/str.h" #include "libc/time/struct/tm.h" #include "libc/time/time.h" #include "third_party/finger/finger.h" // clang-format off #ifndef lint /*static char sccsid[] = "from: @(#)sprint.c 5.8 (Berkeley) 12/4/90";*/ char sprint_rcsid[] = "$Id: sprint.c,v 1.10 1999/12/12 18:59:33 dholland Exp $"; #endif /* not lint */ static void stimeprint(WHERE *w); static int psort(const void *a, const void *b); static PERSON **sort(void); void sflag_print(void) { register PERSON *pn; register WHERE *w; register char *p; PERSON **list; int maxlname, maxrname, space, cnt; list = sort(); /* * short format -- * login name * real name * terminal name * if terminal writeable (add an '*' to the terminal name * if not) * if logged in show idle time and day logged in, else * show last login date and time. If > 6 moths, * show year instead of time. * office location * office phone */ maxlname = maxrname = sizeof("Login "); for (cnt = 0; cnt < entries; ++cnt) { int l; pn = list[cnt]; l = pn->name ? strlen(pn->name) : 1; if (l > maxlname) maxlname = l; l = pn->realname ? strlen(pn->realname) : 1; if (l > maxrname) maxrname = l; } /* prevent screen overflow */ space = getscreenwidth() - 50; if (maxlname + maxrname > space) maxrname = space - maxlname; /* add a space if there's room */ if (maxlname + maxrname < space-2) { maxlname++; maxrname++; } (void)xprintf("%-*s %-*s %s\n", maxlname, "Login", maxrname, "Name", " Tty Idle Login Time Office Office Phone"); for (cnt = 0; cnt < entries; ++cnt) { pn = list[cnt]; for (w = pn->whead; w != NULL; w = w->next) { (void)xprintf("%-*.*s %-*.*s ", maxlname, maxlname, pn->name, maxrname, maxrname, pn->realname ? pn->realname : ""); if (!w->loginat) { (void)xprintf(" * * No logins "); goto office; } (void)xputc(w->info == LOGGEDIN && !w->writable ? '*' : ' '); if (*w->tty) (void)xprintf("%-7.7s ", w->tty); else (void)xprintf(" "); if (w->info == LOGGEDIN) { stimeprint(w); (void)xprintf(" "); } else (void)xprintf(" * "); p = ctime(&w->loginat); (void)xprintf("%.6s", p + 4); if (now - w->loginat >= SECSPERDAY * DAYSPERNYEAR / 2) (void)xprintf(" %.4s", p + 20); else (void)xprintf(" %.5s", p + 11); office: if (w->host[0] != '\0') { xprintf(" (%s)", w->host); } else { if (pn->office) (void)xprintf(" %-10.10s", pn->office); else if (pn->officephone) (void)xprintf(" %-10.10s", " "); if (pn->officephone) (void)xprintf(" %-.14s", prphone(pn->officephone)); } xputc('\n'); } } } static PERSON **sort(void) { register PERSON *pn, **lp; PERSON **list; if (!(list = (PERSON **)malloc((unsigned)(entries * sizeof(PERSON *))))) { eprintf("finger: Out of space.\n"); exit(1); } for (lp = list, pn = phead; pn != NULL; pn = pn->next) *lp++ = pn; (void)qsort(list, entries, sizeof(PERSON *), psort); return(list); } static int psort(const void *a, const void *b) { const PERSON *const *p = (const PERSON *const *)a; const PERSON *const *t = (const PERSON *const *)b; return(strcmp((*p)->name, (*t)->name)); } static void stimeprint(WHERE *w) { register struct tm *delta; delta = gmtime(&w->idletime); if (!delta->tm_yday) if (!delta->tm_hour) if (!delta->tm_min) (void)xprintf(" "); else (void)xprintf("%5d", delta->tm_min); else (void)xprintf("%2d:%02d", delta->tm_hour, delta->tm_min); else (void)xprintf("%4dd", delta->tm_yday); }
5,638
174
jart/cosmopolitan
false
cosmopolitan/third_party/finger/finger.h
// clang-format off /* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)finger.h 5.5 (Berkeley) 6/1/90 * $Id: finger.h,v 1.7 1999/09/14 10:51:11 dholland Exp $ */ #include "third_party/musl/passwd.h" #include "libc/runtime/utmp.h" #include "libc/calls/weirdtypes.h" /* * All unique persons are linked in a list headed by "head" and linkd * by the "next" field, as well as kept in a hash table. */ typedef struct person { struct person *next; /* link to next person */ struct person *hlink; /* link to next person in hash bucket */ uid_t uid; /* user id */ char *dir; /* user's home directory */ char *homephone; /* pointer to home phone no. */ char *name; /* login name */ char *office; /* pointer to office name */ char *officephone; /* pointer to office phone no. */ char *realname; /* pointer to full name */ char *shell; /* user's shell */ time_t mailread; /* last time mail was read */ time_t mailrecv; /* last time mail was read */ struct where *whead, *wtail; /* list of where he is or has been */ } PERSON; enum status { LASTLOG, LOGGEDIN }; typedef struct where { struct where *next; /* next place he is or has been */ enum status info; /* type/status of request */ short writable; /* tty is writable */ time_t loginat; /* time of (last) login */ time_t idletime; /* how long idle (if logged in) */ char tty[UT_LINESIZE+1]; /* null terminated tty line */ char host[UT_HOSTSIZE+1]; /* null terminated remote host name */ } WHERE; extern PERSON *phead, *ptail; /* the linked list of all people */ extern int entries; /* number of people */ #define TBUFLEN 1024 extern char tbuf[TBUFLEN]; /* temp buffer for anybody */ extern time_t now; extern int lflag, pplan; struct utmp; PERSON *enter_person(struct passwd *); PERSON *find_person(const char *name); PERSON *palloc(void); WHERE *walloc(PERSON *); void lflag_print(void); void sflag_print(void); void enter_where(struct utmp *ut, PERSON *pn); void enter_lastlog(PERSON *pn); int match(struct passwd *pw, const char *user); void netfinger(const char *name); const char *prphone(const char *num); #ifndef DAYSPERNYEAR #define DAYSPERNYEAR 365 #endif #ifndef SECSPERDAY #define SECSPERDAY (60 * 60 * 24) #endif /* turn on crnl translation on output */ void set_crmode(void); /* Display, masking control characters and possibly doing crnl translation */ void xputc(int ch); void xputs(const char *buf); int xprintf(const char *fmt, ...); /* Send to stderr, possibly doing crnl translation */ int eprintf(const char *fmt, ...); /* terminal inquiries */ int is8bit(void); int getscreenwidth(void);
4,541
122
jart/cosmopolitan
false
cosmopolitan/third_party/finger/display.c
/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Tony Nardo of the Johns Hopkins University/Applied Physics Lab. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/ioctl.h" #include "libc/calls/struct/termios.h" #include "libc/calls/struct/winsize.h" #include "libc/calls/termios.h" #include "libc/fmt/fmt.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" #include "libc/sysv/consts/fileno.h" #include "libc/sysv/consts/termios.h" #include "third_party/finger/finger.h" // clang-format off int getscreenwidth(void) { struct winsize ws; if (ioctl(STDIN_FILENO, TIOCGWINSZ, &ws) < 0 || ws.ws_col==0) { return 80; } return ws.ws_col; } int is8bit(void) { static int cache=-1; struct termios tios; if (cache>=0) return cache; if (tcgetattr(STDIN_FILENO, &tios)<0) { /* assume 8-bit; it's 1999 now, not 1972 */ cache = 1; } else { cache = (tios.c_cflag & CSIZE)==CS8; } return cache; } /************/ static int send_crs=0; void set_crmode(void) { send_crs = 1; } static void fxputc(FILE *f, int ch) { /* drop any sign */ ch = ch&0xff; /* on 7-bit terminals, strip high bit */ if (!is8bit()) ch &= 0x7f; /* * Assume anything that isn't a control character is printable. * We can't count on locale stuff to tell us what's printable * because we might be looking at someone who uses different * locale settings or is on the other side of the planet. So, * strip 0-31, 127, 128-159, and 255. Note that not stripping * 128-159 is asking for trouble, as 155 (M-esc) is interpreted * as esc-[ by most terminals. Hopefully this won't break anyone's * charset. * * It would be nice if we could set the terminal to display in the * right charset, but we have no way to know what it is. feh. */ if (((ch&0x7f) >= 32 && (ch&0x7f) != 0x7f) || ch=='\t') { putc(ch, f); return; } if (ch=='\n') { if (send_crs) putc('\r', f); putc('\n', f); return; } if (ch&0x80) { putc('M', f); putc('-', f); ch &= 0x7f; } putc('^', f); if (ch==0x7f) putc('?', f); else putc(ch+'@', f); } void xputc(int ch) { fxputc(stdout, ch); } static void fxputs(FILE *f, const char *buf) { int i; for (i=0; buf[i]; i++) fxputc(f, buf[i]); } int xprintf(const char *fmt, ...) { char buf[1024]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); fxputs(stdout, buf); return strlen(buf); } int eprintf(const char *fmt, ...) { char buf[1024]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); fxputs(stderr, buf); return strlen(buf); }
4,397
167
jart/cosmopolitan
false
cosmopolitan/third_party/linenoise/LICENSE
Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com> Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,408
26
jart/cosmopolitan
false
cosmopolitan/third_party/linenoise/README.cosmo
DESCRIPTION Cosmopolitan Linenoise is a library for interactive pseudoteletypewriter command sessions using ANSI Standard X3.64 control sequences. ORIGIN https://github.com/antirez/linenoise 97d2850af13c339369093b78abe5265845d78220 Author: antirez <[email protected]> Date: Thu Mar 12 15:51:45 2020 +0100 Use unsigned int instead of uint like rest of code base. DOCUMENTATION See linenoise.c LOCAL CHANGES See linenoise.c
449
21
jart/cosmopolitan
false
cosmopolitan/third_party/linenoise/linenoise.h
#ifndef COSMOPOLITAN_THIRD_PARTY_LINENOISE_LINENOISE_H_ #define COSMOPOLITAN_THIRD_PARTY_LINENOISE_LINENOISE_H_ #include "libc/calls/struct/winsize.h" #include "libc/stdio/stdio.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ struct linenoiseState; typedef struct linenoiseCompletions { size_t len; char **cvec; } linenoiseCompletions; typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *); typedef char *(linenoiseHintsCallback)(const char *, const char **, const char **); typedef void(linenoiseFreeHintsCallback)(void *); typedef wint_t(linenoiseXlatCallback)(wint_t); void linenoiseSetCompletionCallback(linenoiseCompletionCallback *); void linenoiseSetHintsCallback(linenoiseHintsCallback *); void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); void linenoiseAddCompletion(linenoiseCompletions *, const char *); void linenoiseSetXlatCallback(linenoiseXlatCallback *); char *linenoise(const char *) dontdiscard; char *linenoiseWithHistory(const char *, const char *) dontdiscard; int linenoiseHistoryAdd(const char *); int linenoiseHistorySave(const char *); int linenoiseHistoryLoad(const char *); void linenoiseFreeCompletions(linenoiseCompletions *); void linenoiseHistoryFree(void); void linenoiseClearScreen(int); void linenoiseMaskModeEnable(void); void linenoiseMaskModeDisable(void); int linenoiseEnableRawMode(int); void linenoiseDisableRawMode(void); void linenoiseFree(void *); int linenoiseIsTerminal(void); int linenoiseIsTeletype(void); char *linenoiseGetHistoryPath(const char *); struct linenoiseState *linenoiseBegin(const char *, int, int); ssize_t linenoiseEdit(struct linenoiseState *, const char *, char **, bool); int linenoiseGetInterrupt(void); void linenoiseEnd(struct linenoiseState *); char *linenoiseGetLine(FILE *); struct winsize linenoiseGetTerminalSize(struct winsize, int, int); void linenoiseRefreshLine(struct linenoiseState *); char *linenoiseRaw(const char *, int, int) dontdiscard; COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_LINENOISE_LINENOISE_H_ */
2,157
56
jart/cosmopolitan
false
cosmopolitan/third_party/linenoise/linenoise.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_LINENOISE THIRD_PARTY_LINENOISE_ARTIFACTS += THIRD_PARTY_LINENOISE_A THIRD_PARTY_LINENOISE = $(THIRD_PARTY_LINENOISE_A_DEPS) $(THIRD_PARTY_LINENOISE_A) THIRD_PARTY_LINENOISE_A = o/$(MODE)/third_party/linenoise/linenoise.a THIRD_PARTY_LINENOISE_A_FILES := $(wildcard third_party/linenoise/*) THIRD_PARTY_LINENOISE_A_HDRS = $(filter %.h,$(THIRD_PARTY_LINENOISE_A_FILES)) THIRD_PARTY_LINENOISE_A_SRCS = $(filter %.c,$(THIRD_PARTY_LINENOISE_A_FILES)) THIRD_PARTY_LINENOISE_A_OBJS = $(THIRD_PARTY_LINENOISE_A_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_LINENOISE_A_CHECKS = \ $(THIRD_PARTY_LINENOISE_A).pkg \ $(THIRD_PARTY_LINENOISE_A_HDRS:%=o/$(MODE)/%.ok) THIRD_PARTY_LINENOISE_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_FMT \ LIBC_INTRIN \ LIBC_NEXGEN32E \ LIBC_MEM \ LIBC_SYSV \ LIBC_SOCK \ LIBC_STDIO \ LIBC_RUNTIME \ LIBC_LOG \ LIBC_SYSV_CALLS \ LIBC_STR \ LIBC_STUBS \ NET_HTTP THIRD_PARTY_LINENOISE_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_LINENOISE_A_DIRECTDEPS),$($(x)))) $(THIRD_PARTY_LINENOISE_A): \ third_party/linenoise/ \ $(THIRD_PARTY_LINENOISE_A).pkg \ $(THIRD_PARTY_LINENOISE_A_OBJS) $(THIRD_PARTY_LINENOISE_A).pkg: \ $(THIRD_PARTY_LINENOISE_A_OBJS) \ $(foreach x,$(THIRD_PARTY_LINENOISE_A_DIRECTDEPS),$($(x)_A).pkg) $(THIRD_PARTY_LINENOISE_A_OBJS): private \ OVERRIDE_CFLAGS += \ -fno-jump-tables \ -ffunction-sections \ -fdata-sections THIRD_PARTY_LINENOISE_LIBS = $(foreach x,$(THIRD_PARTY_LINENOISE_ARTIFACTS),$($(x))) THIRD_PARTY_LINENOISE_SRCS = $(foreach x,$(THIRD_PARTY_LINENOISE_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_LINENOISE_HDRS = $(foreach x,$(THIRD_PARTY_LINENOISE_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_LINENOISE_CHECKS = $(foreach x,$(THIRD_PARTY_LINENOISE_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_LINENOISE_OBJS = $(foreach x,$(THIRD_PARTY_LINENOISE_ARTIFACTS),$($(x)_OBJS)) $(THIRD_PARTY_LINENOISE_OBJS): third_party/linenoise/linenoise.mk .PHONY: o/$(MODE)/third_party/linenoise o/$(MODE)/third_party/linenoise: $(THIRD_PARTY_LINENOISE_CHECKS)
2,335
62
jart/cosmopolitan
false
cosmopolitan/third_party/linenoise/linenoise.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ Cosmopolitan Linenoise ── guerrilla line editing library against the │ │ notion that a library for user-friendly pseudoteletypewriter command │ │ sessions using ANSI Standard X3.64 control sequences must have 100k+ │ │ lines of GPL workarounds to 300 baud and bygone commercial rivalries │ │ │ │ CHANGES │ │ │ │ - Remove bell │ │ - Add kill ring │ │ - Fix flickering │ │ - Add UTF-8 editing │ │ - Add CTRL-R search │ │ - Support unlimited lines │ │ - React to terminal resizing │ │ - Support terminal flow control │ │ - Make history loading 10x faster │ │ - Make multiline mode the only mode │ │ - Support unlimited input line length │ │ - Restore raw mode on process foregrounding │ │ - Make source code compatible with C++ compilers │ │ - Fix corruption issues by using generalized parsing │ │ - Implement nearly all GNU readline editing shortcuts │ │ - Remove heavyweight dependencies like printf/sprintf │ │ - Remove ISIG→^C→EAGAIN hack and use ephemeral handlers │ │ - Support running on Windows in MinTTY or CMD.EXE on Win10+ │ │ - Support diacratics, русский, Ελληνικά, 中国人, 日本語, 한국인 │ │ │ │ SHORTCUTS │ │ │ │ CTRL-E END │ │ CTRL-A START │ │ CTRL-B BACK │ │ CTRL-F FORWARD │ │ CTRL-L CLEAR │ │ CTRL-H BACKSPACE │ │ CTRL-D DELETE │ │ CTRL-D EOF (IF EMPTY) │ │ CTRL-N NEXT HISTORY │ │ CTRL-P PREVIOUS HISTORY │ │ CTRL-R SEARCH HISTORY │ │ CTRL-G CANCEL SEARCH │ │ ALT-< BEGINNING OF HISTORY │ │ ALT-> END OF HISTORY │ │ ALT-F FORWARD WORD │ │ ALT-B BACKWARD WORD │ │ CTRL-RIGHT FORWARD WORD │ │ CTRL-LEFT BACKWARD WORD │ │ CTRL-ALT-F FORWARD EXPR │ │ CTRL-ALT-B BACKWARD EXPR │ │ ALT-RIGHT FORWARD EXPR │ │ ALT-LEFT BACKWARD EXPR │ │ ALT-SHIFT-B BARF EXPR │ │ ALT-SHIFT-S SLURP EXPR │ │ CTRL-K KILL LINE FORWARDS │ │ CTRL-U KILL LINE BACKWARDS │ │ ALT-H KILL WORD BACKWARDS │ │ CTRL-W KILL WORD BACKWARDS │ │ CTRL-ALT-H KILL WORD BACKWARDS │ │ ALT-D KILL WORD FORWARDS │ │ CTRL-Y YANK │ │ ALT-Y ROTATE KILL RING AND YANK AGAIN │ │ ALT-\ SQUEEZE ADJACENT WHITESPACE │ │ CTRL-T TRANSPOSE │ │ ALT-T TRANSPOSE WORD │ │ ALT-U UPPERCASE WORD │ │ ALT-L LOWERCASE WORD │ │ ALT-C CAPITALIZE WORD │ │ CTRL-C CTRL-C INTERRUPT PROCESS │ │ CTRL-Z SUSPEND PROCESS │ │ CTRL-\ QUIT PROCESS │ │ CTRL-S PAUSE OUTPUT │ │ CTRL-Q UNPAUSE OUTPUT (IF PAUSED) │ │ CTRL-Q ESCAPED INSERT │ │ CTRL-SPACE SET MARK │ │ CTRL-X CTRL-X GOTO MARK │ │ PROTIP REMAP CAPS LOCK TO CTRL │ │ │ │ EXAMPLE │ │ │ │ // should be ~80kb statically linked │ │ // will save history to ~/.foo_history │ │ // cc -fno-jump-tables -Os -o foo foo.c linenoise.c │ │ main() { │ │ char *line; │ │ while ((line = linenoiseWithHistory("IN> ", "foo"))) { │ │ fputs("OUT> ", stdout); │ │ fputs(line, stdout); │ │ fputs("\n", stdout); │ │ free(line); │ │ } │ │ } │ │ │ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ Copyright 2018-2021 Justine Tunney <[email protected]> │ │ Copyright 2010-2016 Salvatore Sanfilippo <[email protected]> │ │ Copyright 2010-2013 Pieter Noordhuis <[email protected]> │ │ │ │ All rights reserved. │ │ │ │ Redistribution and use in source and binary forms, with or without │ │ modification, are permitted provided that the following conditions are │ │ met: │ │ │ │ * Redistributions of source code must retain the above copyright │ │ notice, this list of conditions and the following disclaimer. │ │ │ │ * Redistributions in binary form must reproduce the above copyright │ │ notice, this list of conditions and the following disclaimer in the │ │ documentation and/or other materials provided with the distribution. │ │ │ │ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS │ │ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT │ │ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR │ │ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT │ │ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, │ │ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT │ │ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, │ │ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY │ │ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT │ │ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE │ │ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "third_party/linenoise/linenoise.h" #include "libc/assert.h" #include "libc/calls/calls.h" #include "libc/calls/sig.internal.h" #include "libc/calls/struct/sigaction.h" #include "libc/calls/struct/stat.h" #include "libc/calls/termios.h" #include "libc/calls/ttydefaults.h" #include "libc/calls/weirdtypes.h" #include "libc/dce.h" #include "libc/errno.h" #include "libc/fmt/conv.h" #include "libc/intrin/asan.internal.h" #include "libc/intrin/bits.h" #include "libc/intrin/bsr.h" #include "libc/intrin/nomultics.internal.h" #include "libc/intrin/strace.internal.h" #include "libc/intrin/tpenc.h" #include "libc/log/check.h" #include "libc/log/log.h" #include "libc/macros.internal.h" #include "libc/mem/alg.h" #include "libc/mem/mem.h" #include "libc/nexgen32e/rdtsc.h" #include "libc/nt/version.h" #include "libc/runtime/runtime.h" #include "libc/sock/sock.h" #include "libc/sock/struct/pollfd.h" #include "libc/stdio/append.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" #include "libc/str/tab.internal.h" #include "libc/str/unicode.h" #include "libc/sysv/consts/fileno.h" #include "libc/sysv/consts/map.h" #include "libc/sysv/consts/o.h" #include "libc/sysv/consts/poll.h" #include "libc/sysv/consts/prot.h" #include "libc/sysv/consts/s.h" #include "libc/sysv/consts/sa.h" #include "libc/sysv/consts/sig.h" #include "libc/sysv/consts/termios.h" #include "libc/sysv/errfuns.h" #include "net/http/escape.h" #include "tool/build/lib/case.h" asm(".ident\t\"\\n\\n\ Cosmopolitan Linenoise (BSD-2)\\n\ Copyright 2018-2020 Justine Tunney <[email protected]>\\n\ Copyright 2010-2016 Salvatore Sanfilippo <[email protected]>\\n\ Copyright 2010-2013 Pieter Noordhuis <[email protected]>\""); #define LINENOISE_POLL_MS __SIG_POLLING_INTERVAL_MS #define LINENOISE_MAX_RING 8 #define LINENOISE_MAX_DEBUG 16 #define LINENOISE_MAX_HISTORY 1024 #define LINENOISE_HISTORY_FIRST +LINENOISE_MAX_HISTORY #define LINENOISE_HISTORY_PREV +1 #define LINENOISE_HISTORY_NEXT -1 #define LINENOISE_HISTORY_LAST -LINENOISE_MAX_HISTORY #if 0 #define DEBUG(L, ...) linenoiseDebug(L, __VA_ARGS__) #else #define DEBUG(L, ...) (void)0 #endif #define DUFF_ROUTINE_LOOP 0 #define DUFF_ROUTINE_SEARCH 1 #define DUFF_ROUTINE_START 5 #define DUFF_ROUTINE_LABEL(STATE) \ case STATE: \ linenoiseRefreshLineForce(l); \ l->state = STATE #define DUFF_ROUTINE_READ(STATE) \ DUFF_ROUTINE_LABEL(STATE); \ rc = linenoiseRead(l->ifd, seq, sizeof(seq), l, block); \ if (rc == -1 && errno == EAGAIN) { \ l->state = STATE; \ return -1; \ } #define BLOCKING_READ() rc = linenoiseRead(l->ifd, seq, sizeof(seq), l, false) struct abuf { char *b; int len; int cap; }; struct rune { unsigned c; unsigned n; }; struct linenoiseRing { unsigned i; char *p[LINENOISE_MAX_RING]; }; struct linenoiseState { int state; /* state machine */ int ifd; /* terminal stdin file descriptor */ int ofd; /* terminal stdout file descriptor */ struct winsize ws; /* rows and columns in terminal */ char *buf; /* edited line buffer */ const char *prompt; /* prompt to display */ int hindex; /* history index */ int rows; /* rows being used */ int oldpos; /* previous refresh cursor position */ unsigned debugrow; /* row for debug display */ unsigned buflen; /* edited line buffer size */ unsigned pos; /* current buffer index */ unsigned len; /* current edited line length */ unsigned mark; /* saved cursor position */ unsigned yi, yj; /* boundaries of last yank */ char seq[2][16]; /* keystroke history for yanking code */ char final; /* set to true on last update */ char dirty; /* if an update was squashed */ linenoiseCompletions lc; struct abuf ab; int i, j, perline, itemlen; // for reverse search int fail, matlen, oldindex, olderpos; const char *oldprompt; }; static const unsigned short kMirrorLeft[][2] = { {L'(', L')'}, {L'[', L']'}, {L'{', L'}'}, {L'⁅', L'⁆'}, {L'⁽', L'⁾'}, {L'₍', L'₎'}, {L'⌈', L'⌉'}, {L'⌊', L'⌋'}, {L'〈', L'〉'}, {L'❨', L'❩'}, {L'❪', L'❫'}, {L'❬', L'❭'}, {L'❮', L'❯'}, {L'❰', L'❱'}, {L'❲', L'❳'}, {L'❴', L'❵'}, {L'⟅', L'⟆'}, {L'⟦', L'⟧'}, {L'⟨', L'⟩'}, {L'⟪', L'⟫'}, {L'⟬', L'⟭'}, {L'⟮', L'⟯'}, {L'⦃', L'⦄'}, {L'⦅', L'⦆'}, {L'⦇', L'⦈'}, {L'⦉', L'⦊'}, {L'⦋', L'⦌'}, {L'⦍', L'⦐'}, {L'⦏', L'⦎'}, {L'⦑', L'⦒'}, {L'⦓', L'⦔'}, {L'⦗', L'⦘'}, {L'⧘', L'⧙'}, {L'⧚', L'⧛'}, {L'â§¼', L'â§½'}, {L'﹙', L'﹚'}, {L'﹛', L'﹜'}, {L'﹝', L'﹞'}, {L'(', L')'}, {L'ï¼»', L'ï¼½'}, {L'{', L'}'}, {L'ï½¢', L'ï½£'}, }; static const unsigned short kMirrorRight[][2] = { {L')', L'('}, {L']', L'['}, {L'}', L'{'}, {L'⁆', L'⁅'}, {L'⁾', L'⁽'}, {L'₎', L'₍'}, {L'⌉', L'⌈'}, {L'⌋', L'⌊'}, {L'〉', L'〈'}, {L'❩', L'❨'}, {L'❫', L'❪'}, {L'❭', L'❬'}, {L'❯', L'❮'}, {L'❱', L'❰'}, {L'❳', L'❲'}, {L'❵', L'❴'}, {L'⟆', L'⟅'}, {L'⟧', L'⟦'}, {L'⟩', L'⟨'}, {L'⟫', L'⟪'}, {L'⟭', L'⟬'}, {L'⟯', L'⟮'}, {L'⦄', L'⦃'}, {L'⦆', L'⦅'}, {L'⦈', L'⦇'}, {L'⦊', L'⦉'}, {L'⦌', L'⦋'}, {L'⦎', L'⦏'}, {L'⦐', L'⦍'}, {L'⦒', L'⦑'}, {L'⦔', L'⦓'}, {L'⦘', L'⦗'}, {L'⧙', L'⧘'}, {L'⧛', L'⧚'}, {L'â§½', L'â§¼'}, {L'﹚', L'﹙'}, {L'﹜', L'﹛'}, {L'﹞', L'﹝'}, {L')', L'('}, {L'ï¼½', L'ï¼»'}, {L'}', L'{'}, {L'ï½£', L'ï½¢'}, }; static const char *const kUnsupported[] = {"dumb", "cons25", "emacs"}; static int gotint; static int gotcont; static int gotwinch; static char maskmode; static char ispaused; static char iscapital; static int historylen; static signed char rawmode = -1; static struct linenoiseRing ring; static struct sigaction orig_int; static struct sigaction orig_quit; static struct sigaction orig_cont; static struct sigaction orig_winch; static struct termios orig_termios; static char *history[LINENOISE_MAX_HISTORY]; static linenoiseXlatCallback *xlatCallback; static linenoiseHintsCallback *hintsCallback; static linenoiseFreeHintsCallback *freeHintsCallback; static linenoiseCompletionCallback *completionCallback; static unsigned GetMirror(const unsigned short A[][2], size_t n, unsigned c) { int l, m, r; l = 0; r = n - 1; while (l <= r) { m = (l + r) >> 1; if (A[m][0] < c) { l = m + 1; } else if (A[m][0] > c) { r = m - 1; } else { return A[m][1]; } } return 0; } static unsigned GetMirrorLeft(unsigned c) { return GetMirror(kMirrorRight, ARRAYLEN(kMirrorRight), c); } static unsigned GetMirrorRight(unsigned c) { return GetMirror(kMirrorLeft, ARRAYLEN(kMirrorLeft), c); } static int isxseparator(wint_t c) { return iswseparator(c) && !GetMirrorLeft(c) && !GetMirrorRight(c); } static int notwseparator(wint_t c) { return !iswseparator(c); } static int iswname(wint_t c) { return !iswseparator(c) || c == '_' || c == '-' || c == '.' || c == ':'; } static int notwname(wint_t c) { return !iswname(c); } static void linenoiseOnInt(int sig) { gotint = sig; } static void linenoiseOnCont(int sig) { gotcont = sig; } static void linenoiseOnWinch(int sig) { gotwinch = sig; } static wint_t Capitalize(wint_t c) { if (!iscapital) { c = towupper(c); iscapital = 1; } return c; } static struct rune DecodeUtf8(int c) { struct rune r; if (c < 252) { r.n = _bsr(255 & ~c); r.c = c & (((1 << r.n) - 1) | 3); r.n = 6 - r.n; } else { r.c = c & 3; r.n = 5; } return r; } static struct rune GetUtf8(const char *p, size_t n) { struct rune r; if ((r.n = r.c = 0) < n && (r.c = p[r.n++] & 255) >= 0300) { r.c = DecodeUtf8(r.c).c; while (r.n < n && (p[r.n] & 0300) == 0200) { r.c = r.c << 6 | p[r.n++] & 077; } } return r; } static size_t GetFdSize(int fd) { struct stat st; st.st_size = 0; fstat(fd, &st); return st.st_size; } static char IsCharDev(int fd) { struct stat st; st.st_mode = 0; fstat(fd, &st); return (st.st_mode & S_IFMT) == S_IFCHR; } static int linenoiseIsUnsupportedTerm(void) { int i; char *term; static char once, res; if (!once) { if (IsWindows() && !IsAtLeastWindows10()) { res = 1; } else if ((term = getenv("TERM"))) { for (i = 0; i < sizeof(kUnsupported) / sizeof(*kUnsupported); i++) { if (!strcasecmp(term, kUnsupported[i])) { res = 1; break; } } } once = 1; } return res; } int linenoiseIsTerminal(void) { static int once, res; if (!once) { res = isatty(fileno(stdin)) && isatty(fileno(stdout)) && !linenoiseIsUnsupportedTerm(); once = 1; } return res; } int linenoiseIsTeletype(void) { static int once, res; if (!once) { res = linenoiseIsTerminal() || (IsCharDev(fileno(stdin)) && IsCharDev(fileno(stdout))); once = 1; } return res; } char *linenoiseGetLine(FILE *f) { ssize_t rc; char *p = 0; size_t n, c = 0; if ((rc = getdelim(&p, &c, '\n', f)) != EOF) { for (n = rc; n; --n) { if (p[n - 1] == '\r' || p[n - 1] == '\n') { p[n - 1] = 0; } else { break; } } return p; } else { free(p); return 0; } } static const char *FindSubstringReverse(const char *p, size_t n, const char *q, size_t m) { size_t i; if (m <= n) { n -= m; do { for (i = 0; i < m; ++i) { if (kToLower[p[n + i] & 255] != kToLower[q[i] & 255]) { break; } } if (kToLower[i & 255] == kToLower[m & 255]) { return p + n; } } while (n--); } return 0; } static int ParseUnsigned(const char *s, void *e) { int c, x; for (x = 0; (c = *s++);) { if ('0' <= c && c <= '9') { x = MIN(c - '0' + x * 10, 32767); } else { break; } } if (e) *(const char **)e = s; return x; } static char *FormatUnsigned(char *p, unsigned x) { char t; size_t i, a, b; i = 0; do { p[i++] = x % 10 + '0'; x = x / 10; } while (x > 0); p[i] = '\0'; if (i) { for (a = 0, b = i - 1; a < b; ++a, --b) { t = p[a]; p[a] = p[b]; p[b] = t; } } return p + i; } static char HasPendingInput(int fd) { return poll((struct pollfd[]){{fd, POLLIN}}, 1, 0) == 1; } /** * Returns UNICODE CJK Monospace Width of string. * * Control codes and ANSI sequences have a width of zero. We only parse * a limited subset of ANSI here since we don't store ANSI codes in the * linenoiseState::buf, but we do encourage CSI color codes in prompts. */ static size_t GetMonospaceWidth(const char *p, size_t n, char *out_haswides) { int c, d; size_t i, w; struct rune r; char haswides; enum { kAscii, kUtf8, kEsc, kCsi1, kCsi2 } t; for (haswides = r.c = r.n = t = w = i = 0; i < n; ++i) { c = p[i] & 255; switch (t) { Whoopsie: t = kAscii; /* fallthrough */ case kAscii: if (c < 0200) { if (c == 033) { t = kEsc; } else { ++w; } } else if (c >= 0300) { t = kUtf8; r = DecodeUtf8(c); } break; case kUtf8: if ((c & 0300) == 0200) { r.c <<= 6; r.c |= c & 077; if (!--r.n) { d = wcwidth(r.c); d = MAX(0, d); w += d; haswides |= d > 1; t = kAscii; } } else { goto Whoopsie; } break; case kEsc: if (c == '[') { t = kCsi1; } else { t = kAscii; } break; case kCsi1: if (0x20 <= c && c <= 0x2f) { t = kCsi2; } else if (0x40 <= c && c <= 0x7e) { t = kAscii; } else if (!(0x30 <= c && c <= 0x3f)) { goto Whoopsie; } break; case kCsi2: if (0x40 <= c && c <= 0x7e) { t = kAscii; } else if (!(0x20 <= c && c <= 0x2f)) { goto Whoopsie; } break; default: unreachable; } } if (out_haswides) { *out_haswides = haswides; } return w; } static void abInit(struct abuf *a) { a->len = 0; a->cap = 16; a->b = malloc(a->cap); a->b[0] = 0; } static char abGrow(struct abuf *a, int need) { int cap; char *b; cap = a->cap; do { cap += cap / 2; } while (cap < need); if (!(b = realloc(a->b, cap * sizeof(*a->b)))) return 0; a->cap = cap; a->b = b; return 1; } static void abAppend(struct abuf *a, const char *s, int len) { if (a->len + len + 1 > a->cap && !abGrow(a, a->len + len + 1)) return; memcpy(a->b + a->len, s, len); a->b[a->len + len] = 0; a->len += len; } static void abAppends(struct abuf *a, const char *s) { abAppend(a, s, strlen(s)); } static void abAppendu(struct abuf *a, unsigned u) { char b[11]; abAppend(a, b, FormatUnsigned(b, u) - b); } static void abAppendw(struct abuf *a, unsigned long long w) { char *p; if (a->len + 8 + 1 > a->cap && !abGrow(a, a->len + 8 + 1)) return; p = a->b + a->len; p[0] = (0x00000000000000ff & w) >> 000; p[1] = (0x000000000000ff00 & w) >> 010; p[2] = (0x0000000000ff0000 & w) >> 020; p[3] = (0x00000000ff000000 & w) >> 030; p[4] = (0x000000ff00000000 & w) >> 040; p[5] = (0x0000ff0000000000 & w) >> 050; p[6] = (0x00ff000000000000 & w) >> 060; p[7] = (0xff00000000000000 & w) >> 070; a->len += w ? (_bsrll(w) >> 3) + 1 : 1; p[8] = 0; } static void abFree(struct abuf *a) { free(a->b); } static void linenoiseUnpause(int fd) { if (ispaused) { tcflow(fd, TCOON); ispaused = 0; } } int linenoiseEnableRawMode(int fd) { struct termios raw; struct sigaction sa; if (rawmode == -1) { if (tcgetattr(fd, &orig_termios) != -1) { raw = orig_termios; raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); raw.c_oflag |= OPOST | ONLCR; raw.c_iflag |= IUTF8; raw.c_cflag |= CS8; raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; if (tcsetattr(fd, TCSANOW, &raw) != -1) { sa.sa_flags = 0; sa.sa_handler = linenoiseOnCont; sigemptyset(&sa.sa_mask); sigaction(SIGCONT, &sa, &orig_cont); sa.sa_handler = linenoiseOnWinch; sigaction(SIGWINCH, &sa, &orig_winch); rawmode = fd; gotwinch = 0; gotcont = 0; return 0; } } return enotty(); } else { return 0; } } void linenoiseDisableRawMode(void) { if (rawmode != -1) { linenoiseUnpause(rawmode); sigaction(SIGCONT, &orig_cont, 0); sigaction(SIGWINCH, &orig_winch, 0); tcsetattr(rawmode, TCSANOW, &orig_termios); rawmode = -1; } } static int linenoiseWrite(int fd, const void *p, size_t n) { ssize_t rc; size_t wrote; do { for (;;) { if (ispaused) { return 0; } rc = write(fd, p, n); if (rc == -1 && errno == EINTR) { continue; } else { break; } } if (rc != -1) { wrote = rc; n -= wrote; p = (char *)p + wrote; } else { return -1; } } while (n); return 0; } static int linenoiseWriteStr(int fd, const char *p) { return linenoiseWrite(fd, p, strlen(p)); } static void linenoiseDebug(struct linenoiseState *l, const char *fmt, ...) { va_list va; char *msg = 0; char *ansi = 0; int x, y, n, dy, xn; va_start(va, fmt); (vappendf)(&msg, fmt, va); va_end(va); xn = l->ws.ws_col; xn = MAX(xn, 80); y = l->debugrow; n = GetMonospaceWidth(msg, strlen(msg), 0); x = MAX(xn - n, 0); (appendf)(&ansi, "\e7\e[%u;%uH\e[1K%s\e8", y + 1, x + 1, msg); linenoiseWrite(l->ofd, ansi, appendz(ansi).i); y = (y + (n + (xn - 1)) / xn) % LINENOISE_MAX_DEBUG; l->debugrow = y; free(ansi); free(msg); } static int linenoisePoll(struct linenoiseState *l, int fd) { int rc; if ((rc = poll((struct pollfd[]){{fd, POLLIN}}, 1, 0))) { return rc; } else { l->dirty = true; return eagain(); } } static ssize_t linenoiseRead(int fd, char *buf, size_t size, struct linenoiseState *l, int block) { ssize_t rc; int refreshme; for (;;) { refreshme = 0; if (gotint) { errno = EINTR; return -1; } if (gotcont && rawmode != -1) { rawmode = -1; strace_enabled(-1); linenoiseEnableRawMode(0); strace_enabled(+1); if (l) refreshme = 1; } if (l && gotwinch) refreshme = 1; if (refreshme) linenoiseRefreshLine(l); if (!block && linenoisePoll(l, fd) == -1) return -1; strace_enabled(-1); rc = readansi(fd, buf, size); strace_enabled(+1); if (rc == -1 && errno == EINTR) { if (!block) break; } else { break; } } if (l && rc > 0) { memcpy(l->seq[1], l->seq[0], sizeof(l->seq[0])); memset(l->seq[0], 0, sizeof(l->seq[0])); memcpy(l->seq[0], buf, MIN(MIN(size, rc), sizeof(l->seq[0]) - 1)); } return rc; } /** * Returns number of columns in current terminal. * * 1. Checks COLUMNS environment variable (set by Emacs) * 2. Tries asking termios (works for pseudoteletypewriters) * 3. Falls back to inband signalling (works w/ pipe or serial) * 4. Otherwise we conservatively assume 80 columns * * @param ws should be initialized by caller to zero before first call * @param ifd is input file descriptor * @param ofd is output file descriptor * @return window size */ struct winsize linenoiseGetTerminalSize(struct winsize ws, int ifd, int ofd) { int x; ssize_t n; char *p, *s, b[16]; ioctl(ofd, TIOCGWINSZ, &ws); if ((!ws.ws_row && (s = getenv("ROWS")) && (x = ParseUnsigned(s, 0)))) { ws.ws_row = x; } if ((!ws.ws_col && (s = getenv("COLUMNS")) && (x = ParseUnsigned(s, 0)))) { ws.ws_col = x; } if (((!ws.ws_col || !ws.ws_row) && linenoiseRead(ifd, 0, 0, 0, 1) != -1 && linenoiseWriteStr( ofd, "\e7" /* save position */ "\e[9979;9979H" /* move cursor to bottom right corner */ "\e[6n" /* report position */ "\e8") != -1 && /* restore position */ (n = linenoiseRead(ifd, b, sizeof(b), 0, 1)) != -1 && n && b[0] == 033 && b[1] == '[' && b[n - 1] == 'R')) { p = b + 2; if ((x = ParseUnsigned(p, &p))) ws.ws_row = x; if (*p++ == ';' && (x = ParseUnsigned(p, 0))) ws.ws_col = x; } if (!ws.ws_col) ws.ws_col = 80; if (!ws.ws_row) ws.ws_row = 24; return ws; } /* Clear the screen. Used to handle ctrl+l */ void linenoiseClearScreen(int fd) { linenoiseWriteStr(fd, "\e[H" /* move cursor to top left corner */ "\e[2J"); /* erase display */ } static void linenoiseBeep(void) { // THE TERMINAL BELL IS DEAD - HISTORY HAS KILLED IT } static char linenoiseGrow(struct linenoiseState *ls, size_t n) { char *p; size_t m; m = ls->buflen; if (m >= n) return 1; do { m += m >> 1; } while (m < n); if (!(p = realloc(ls->buf, m * sizeof(*ls->buf)))) return 0; ls->buf = p; ls->buflen = m; return 1; } static wint_t ScrubCompletionCharacter(wint_t c) { if ((0x00 <= c && c <= 0x1F) || c == 0x7F) { return kCp437[c]; } else { return c; } } static size_t linenoiseMaxCompletionWidth(linenoiseCompletions *lc) { size_t i, n, m; for (m = i = 0; i < lc->len; ++i) { n = GetMonospaceWidth(lc->cvec[i], strlen(lc->cvec[i]), 0); m = MAX(n, m); } return m; } static size_t Forward(struct linenoiseState *l, size_t pos) { return pos + GetUtf8(l->buf + pos, l->len - pos).n; } static size_t Backward(struct linenoiseState *l, size_t pos) { if (pos) { do { --pos; } while (pos && (l->buf[pos] & 0300) == 0200); } return pos; } static size_t Backwards(struct linenoiseState *l, size_t pos, int pred(wint_t)) { size_t i; struct rune r; while (pos) { i = Backward(l, pos); r = GetUtf8(l->buf + i, l->len - i); if (pred(r.c)) { pos = i; } else { break; } } return pos; } static size_t Forwards(struct linenoiseState *l, size_t pos, int pred(wint_t)) { struct rune r; while (pos < l->len) { r = GetUtf8(l->buf + pos, l->len - pos); if (pred(r.c)) { pos += r.n; } else { break; } } return pos; } static size_t GetCommonPrefixLength(struct linenoiseCompletions *lc) { struct rune r; int i, j, n, c; i = 0; for (n = -1, i = 0; i < lc->len; ++i) { if (n != -1) { n = strnlen(lc->cvec[i], n); } else { n = strlen(lc->cvec[i]); } } for (i = 0, r.n = 0; i < n; i += r.n) { for (c = -1, j = 0; j < lc->len; ++j) { r = GetUtf8(lc->cvec[j] + i, n - i); if (c != -1) { if (r.c != c) { goto Finished; } } else { c = r.c; } } } Finished: return i; } static void linenoiseEditHistoryGoto(struct linenoiseState *l, int i) { size_t n; if (historylen <= 1) return; i = MAX(MIN(i, historylen - 1), 0); free(history[historylen - 1 - l->hindex]); history[historylen - 1 - l->hindex] = strdup(l->buf); l->hindex = i; n = strlen(history[historylen - 1 - l->hindex]); linenoiseGrow(l, n + 1); n = MIN(n, l->buflen - 1); memcpy(l->buf, history[historylen - 1 - l->hindex], n); l->buf[n] = 0; l->len = l->pos = n; linenoiseRefreshLine(l); } static void linenoiseEditHistoryMove(struct linenoiseState *l, int dx) { linenoiseEditHistoryGoto(l, l->hindex + dx); } static char *linenoiseMakeSearchPrompt(int fail, const char *s, int n) { struct abuf ab; abInit(&ab); abAppendw(&ab, '('); if (fail) abAppends(&ab, "failed "); abAppends(&ab, "reverse-i-search `\e[4m"); abAppend(&ab, s, n); abAppends(&ab, "\e[24m"); abAppends(&ab, s + n); abAppendw(&ab, READ32LE("') ")); return ab.b; } static void linenoiseRingFree(void) { size_t i; for (i = 0; i < LINENOISE_MAX_RING; ++i) { if (ring.p[i]) { free(ring.p[i]); ring.p[i] = 0; } } } static void linenoiseRingPush(const char *p, size_t n) { char *q; if (LINENOISE_MAX_RING && n) { if ((q = malloc(n + 1))) { ring.i = (ring.i + 1) % LINENOISE_MAX_RING; free(ring.p[ring.i]); ring.p[ring.i] = memcpy(q, p, n); ring.p[ring.i][n] = 0; } } } static void linenoiseRingRotate(void) { size_t i; for (i = 0; i < LINENOISE_MAX_RING; ++i) { ring.i = (ring.i - 1) % LINENOISE_MAX_RING; if (ring.p[ring.i]) { break; } } } static char *linenoiseRefreshHints(struct linenoiseState *l) { char *hint; struct abuf ab; const char *ansi1, *ansi2; if (!hintsCallback) return 0; if (!(hint = hintsCallback(l->buf, &ansi1, &ansi2))) return 0; abInit(&ab); ansi1 = "\e[90m"; ansi2 = "\e[39m"; if (ansi1) abAppends(&ab, ansi1); abAppends(&ab, hint); if (ansi2) abAppends(&ab, ansi2); if (freeHintsCallback) freeHintsCallback(hint); return ab.b; } static int linenoiseMirrorLeft(struct linenoiseState *l, unsigned res[2]) { unsigned c, pos, left, right, depth, index; if ((pos = Backward(l, l->pos))) { right = GetUtf8(l->buf + pos, l->len - pos).c; if ((left = GetMirrorLeft(right))) { depth = 0; index = pos; do { pos = Backward(l, pos); c = GetUtf8(l->buf + pos, l->len - pos).c; if (c == right) { ++depth; } else if (c == left) { if (depth) { --depth; } else { res[0] = pos; res[1] = index; return 0; } } } while (pos); } } return -1; } static int linenoiseMirrorRight(struct linenoiseState *l, unsigned res[2]) { struct rune rune; unsigned pos, left, right, depth, index; pos = l->pos; rune = GetUtf8(l->buf + pos, l->len - pos); left = rune.c; if ((right = GetMirrorRight(left))) { depth = 0; index = pos; do { pos += rune.n; rune = GetUtf8(l->buf + pos, l->len - pos); if (rune.c == left) { ++depth; } else if (rune.c == right) { if (depth) { --depth; } else { res[0] = index; res[1] = pos; return 0; } } } while (pos + rune.n < l->len); } return -1; } static int linenoiseMirror(struct linenoiseState *l, unsigned res[2]) { int rc; rc = linenoiseMirrorLeft(l, res); if (rc == -1) rc = linenoiseMirrorRight(l, res); return rc; } static void linenoiseRefreshLineImpl(struct linenoiseState *l, int force, const char *prefix) { char *hint; char flipit; char hasflip; char haswides; struct abuf ab; struct rune rune; unsigned flip[2]; const char *p, *buf; struct winsize oldsize; int i, x, y, t, xn, yn, cx, cy, tn, resized; int fd, plen, width, pwidth, rows, len, pos; /* * synchonize the i/o state */ if (ispaused) { if (force) { linenoiseUnpause(l->ofd); } else { return; } } if (!force && HasPendingInput(l->ifd)) { l->dirty = 1; return; } oldsize = l->ws; if ((resized = gotwinch) && rawmode != -1) { gotwinch = 0; l->ws = linenoiseGetTerminalSize(l->ws, l->ifd, l->ofd); } hasflip = !l->final && !linenoiseMirror(l, flip); StartOver: fd = l->ofd; buf = l->buf; pos = l->pos; len = l->len; xn = l->ws.ws_col; yn = l->ws.ws_row; plen = strlen(l->prompt); pwidth = GetMonospaceWidth(l->prompt, plen, 0); width = GetMonospaceWidth(buf, len, &haswides); /* * handle the case where the line is larger than the whole display * gnu readline actually isn't able to deal with this situation!!! * we kludge xn to address the edge case of wide chars on the edge */ for (tn = xn - haswides;;) { if (pwidth + width + 1 < tn * yn) break; /* we're fine */ if (!len || width < 2) break; /* we can't do anything */ if (pwidth + 2 > tn * yn) break; /* we can't do anything */ if (pos > len / 2) { /* hide content on the left if we're editing on the right */ rune = GetUtf8(buf, len); buf += rune.n; len -= rune.n; pos -= rune.n; } else { /* hide content on the right if we're editing on left */ t = len; while (len && (buf[len - 1] & 0300) == 0200) --len; if (len) --len; rune = GetUtf8(buf + len, t - len); } if ((t = wcwidth(rune.c)) > 0) { width -= t; } } pos = MAX(0, MIN(pos, len)); /* * now generate the terminal codes to update the line * * since we support unlimited lines it's important that we don't * clear the screen before we draw the screen. doing that causes * flickering. the key with terminals is to overwrite cells, and * then use \e[K and \e[J to clear everything else. * * we make the assumption that prompts and hints may contain ansi * sequences, but the buffer does not. * * we need to handle the edge case where a wide character like 度 * might be at the edge of the window, when there's one cell left. * so we can't use division based on string width to compute the * coordinates and have to track it as we go. */ cy = -1; cx = -1; rows = 1; abInit(&ab); if (prefix) { // to prevent flicker with ctrl+l abAppends(&ab, prefix); } abAppendw(&ab, '\r'); /* start of line */ if (l->rows - l->oldpos - 1 > 0) { abAppends(&ab, "\e["); abAppendu(&ab, l->rows - l->oldpos - 1); abAppendw(&ab, 'A'); /* cursor up clamped */ } abAppends(&ab, l->prompt); x = pwidth; for (i = 0; i < len; i += rune.n) { rune = GetUtf8(buf + i, len - i); if (x && x + rune.n > xn) { if (cy >= 0) ++cy; if (x < xn) { abAppends(&ab, "\e[K"); /* clear line forward */ } abAppends(&ab, "\r" /* start of line */ "\n"); /* cursor down unclamped */ ++rows; x = 0; } if (i == pos) { cy = 0; cx = x; } if (maskmode) { abAppendw(&ab, '*'); } else { flipit = hasflip && (i == flip[0] || i == flip[1]); if (flipit) abAppendw(&ab, READ32LE("\e[1m")); abAppendw(&ab, _tpenc(rune.c)); if (flipit) abAppendw(&ab, READ64LE("\e[22m\0\0")); } t = wcwidth(rune.c); t = MAX(0, t); x += t; } if (!l->final && (hint = linenoiseRefreshHints(l))) { if (GetMonospaceWidth(hint, strlen(hint), 0) < xn - x) { if (cx < 0) { cx = x; } abAppends(&ab, hint); } free(hint); } abAppendw(&ab, READ32LE("\e[J")); /* erase display forwards */ /* * if we are at the very end of the screen with our prompt, we need to * emit a newline and move the prompt to the first column. */ if (pos && pos == len && x >= xn) { abAppendw(&ab, READ32LE("\n\r\0")); ++rows; } /* * move cursor to right position */ if (cy > 0) { abAppendw(&ab, READ32LE("\e[\0")); abAppendu(&ab, cy); abAppendw(&ab, 'A'); /* cursor up */ } if (cx > 0) { abAppendw(&ab, READ32LE("\r\e[")); abAppendu(&ab, cx); abAppendw(&ab, 'C'); /* cursor right */ } else if (!cx) { abAppendw(&ab, '\r'); /* start */ } /* * now get ready to progress state * we use a mostly correct kludge when the tty resizes */ l->rows = rows; if (resized && oldsize.ws_col > l->ws.ws_col) { resized = 0; abFree(&ab); goto StartOver; } l->oldpos = MAX(0, cy); l->dirty = 0; /* * send codes to terminal */ linenoiseWrite(fd, ab.b, ab.len); abFree(&ab); } void linenoiseRefreshLine(struct linenoiseState *l) { strace_enabled(-1); linenoiseRefreshLineImpl(l, 0, 0); strace_enabled(+1); } static void linenoiseRefreshLineForce(struct linenoiseState *l) { strace_enabled(-1); linenoiseRefreshLineImpl(l, 1, 0); strace_enabled(+1); } static void linenoiseEditInsert(struct linenoiseState *l, const char *p, size_t n) { if (linenoiseGrow(l, l->len + n + 1)) { memmove(l->buf + l->pos + n, l->buf + l->pos, l->len - l->pos); memcpy(l->buf + l->pos, p, n); l->pos += n; l->len += n; l->buf[l->len] = 0; linenoiseRefreshLine(l); } } static void linenoiseEditHome(struct linenoiseState *l) { l->pos = 0; linenoiseRefreshLine(l); } static void linenoiseEditEnd(struct linenoiseState *l) { l->pos = l->len; linenoiseRefreshLine(l); } static void linenoiseEditUp(struct linenoiseState *l) { linenoiseEditHistoryMove(l, LINENOISE_HISTORY_PREV); } static void linenoiseEditDown(struct linenoiseState *l) { linenoiseEditHistoryMove(l, LINENOISE_HISTORY_NEXT); } static void linenoiseEditBof(struct linenoiseState *l) { linenoiseEditHistoryMove(l, LINENOISE_HISTORY_FIRST); } static void linenoiseEditEof(struct linenoiseState *l) { linenoiseEditHistoryMove(l, LINENOISE_HISTORY_LAST); } static void linenoiseEditRefresh(struct linenoiseState *l) { strace_enabled(-1); linenoiseRefreshLineImpl(l, 1, "\e[H" // move cursor to top left corner "\e[2J"); // erase display strace_enabled(+1); } static size_t ForwardWord(struct linenoiseState *l, size_t pos) { pos = Forwards(l, pos, iswseparator); pos = Forwards(l, pos, notwseparator); return pos; } static size_t BackwardWord(struct linenoiseState *l, size_t pos) { pos = Backwards(l, pos, iswseparator); pos = Backwards(l, pos, notwseparator); return pos; } static size_t EscapeWord(struct linenoiseState *l) { size_t i, j; struct rune r; for (i = l->pos; i && i < l->len; i += r.n) { if (i < l->len) { r = GetUtf8(l->buf + i, l->len - i); if (iswseparator(r.c)) break; } if ((j = i)) { do { --j; } while (j && (l->buf[j] & 0300) == 0200); r = GetUtf8(l->buf + j, l->len - j); if (iswseparator(r.c)) break; } } return i; } static void linenoiseEditLeft(struct linenoiseState *l) { l->pos = Backward(l, l->pos); linenoiseRefreshLine(l); } static void linenoiseEditRight(struct linenoiseState *l) { if (l->pos == l->len) return; do { l->pos++; } while (l->pos < l->len && (l->buf[l->pos] & 0300) == 0200); linenoiseRefreshLine(l); } static void linenoiseEditLeftWord(struct linenoiseState *l) { l->pos = BackwardWord(l, l->pos); linenoiseRefreshLine(l); } static void linenoiseEditRightWord(struct linenoiseState *l) { l->pos = ForwardWord(l, l->pos); linenoiseRefreshLine(l); } static void linenoiseEditLeftExpr(struct linenoiseState *l) { unsigned mark[2]; l->pos = Backwards(l, l->pos, isxseparator); if (!linenoiseMirrorLeft(l, mark)) { l->pos = mark[0]; } else { l->pos = Backwards(l, l->pos, notwseparator); } linenoiseRefreshLine(l); } static void linenoiseEditRightExpr(struct linenoiseState *l) { unsigned mark[2]; l->pos = Forwards(l, l->pos, isxseparator); if (!linenoiseMirrorRight(l, mark)) { l->pos = Forward(l, mark[1]); } else { l->pos = Forwards(l, l->pos, notwseparator); } linenoiseRefreshLine(l); } static void linenoiseEditDelete(struct linenoiseState *l) { size_t i; if (l->pos == l->len) return; i = Forward(l, l->pos); memmove(l->buf + l->pos, l->buf + i, l->len - i + 1); l->len -= i - l->pos; linenoiseRefreshLine(l); } static void linenoiseEditRubout(struct linenoiseState *l) { size_t i; if (!l->pos) return; i = Backward(l, l->pos); memmove(l->buf + i, l->buf + l->pos, l->len - l->pos + 1); l->len -= l->pos - i; l->pos = i; linenoiseRefreshLine(l); } static void linenoiseEditDeleteWord(struct linenoiseState *l) { size_t i; if (l->pos == l->len) return; i = ForwardWord(l, l->pos); linenoiseRingPush(l->buf + l->pos, i - l->pos); memmove(l->buf + l->pos, l->buf + i, l->len - i + 1); l->len -= i - l->pos; linenoiseRefreshLine(l); } static void linenoiseEditRuboutWord(struct linenoiseState *l) { size_t i; if (!l->pos) return; i = BackwardWord(l, l->pos); linenoiseRingPush(l->buf + i, l->pos - i); memmove(l->buf + i, l->buf + l->pos, l->len - l->pos + 1); l->len -= l->pos - i; l->pos = i; linenoiseRefreshLine(l); } static void linenoiseEditXlatWord(struct linenoiseState *l, wint_t xlat(wint_t)) { int c; struct rune r; struct abuf ab; size_t i, j, p; abInit(&ab); i = Forwards(l, l->pos, iswseparator); for (j = i; j < l->len; j += r.n) { r = GetUtf8(l->buf + j, l->len - j); if (iswseparator(r.c)) break; if ((c = xlat(r.c)) != r.c) { abAppendw(&ab, _tpenc(c)); } else { /* avoid canonicalization */ abAppend(&ab, l->buf + j, r.n); } } if (ab.len && linenoiseGrow(l, i + ab.len + l->len - j + 1)) { l->pos = i + ab.len; abAppend(&ab, l->buf + j, l->len - j); l->len = i + ab.len; memcpy(l->buf + i, ab.b, ab.len + 1); linenoiseRefreshLine(l); } abFree(&ab); } static void linenoiseEditLowercaseWord(struct linenoiseState *l) { linenoiseEditXlatWord(l, towlower); } static void linenoiseEditUppercaseWord(struct linenoiseState *l) { linenoiseEditXlatWord(l, towupper); } static void linenoiseEditCapitalizeWord(struct linenoiseState *l) { iscapital = 0; linenoiseEditXlatWord(l, Capitalize); } static void linenoiseEditKillLeft(struct linenoiseState *l) { size_t diff, old_pos; linenoiseRingPush(l->buf, l->pos); old_pos = l->pos; l->pos = 0; diff = old_pos - l->pos; memmove(l->buf + l->pos, l->buf + old_pos, l->len - old_pos + 1); l->len -= diff; linenoiseRefreshLine(l); } static void linenoiseEditKillRight(struct linenoiseState *l) { linenoiseRingPush(l->buf + l->pos, l->len - l->pos); l->buf[l->pos] = '\0'; l->len = l->pos; linenoiseRefreshLine(l); } static void linenoiseEditYank(struct linenoiseState *l) { char *p; size_t n; if (!ring.p[ring.i]) return; n = strlen(ring.p[ring.i]); linenoiseGrow(l, l->len + n + 1); p = malloc(l->len - l->pos + 1); memcpy(p, l->buf + l->pos, l->len - l->pos + 1); memcpy(l->buf + l->pos, ring.p[ring.i], n); memcpy(l->buf + l->pos + n, p, l->len - l->pos + 1); free(p); l->yi = l->pos; l->yj = l->pos + n; l->pos += n; l->len += n; linenoiseRefreshLine(l); } static void linenoiseEditRotate(struct linenoiseState *l) { if ((l->seq[1][0] == CTRL('Y') || (l->seq[1][0] == '\e' && l->seq[1][1] == 'y'))) { if (l->yi < l->len && l->yj <= l->len) { memmove(l->buf + l->yi, l->buf + l->yj, l->len - l->yj + 1); l->len -= l->yj - l->yi; l->pos -= l->yj - l->yi; } linenoiseRingRotate(); linenoiseEditYank(l); } } static void linenoiseEditTranspose(struct linenoiseState *l) { char *q, *p; size_t a, b, c; b = l->pos; a = Backward(l, b); c = Forward(l, b); if (!(a < b && b < c)) return; p = q = malloc(c - a); p = mempcpy(p, l->buf + b, c - b); p = mempcpy(p, l->buf + a, b - a); _unassert(p - q == c - a); memcpy(l->buf + a, q, p - q); l->pos = c; free(q); linenoiseRefreshLine(l); } static void linenoiseEditTransposeWords(struct linenoiseState *l) { char *q, *p; struct rune r; size_t pi, xi, xj, yi, yj; pi = EscapeWord(l); xj = Backwards(l, pi, iswseparator); xi = Backwards(l, xj, notwseparator); yi = Forwards(l, pi, iswseparator); yj = Forwards(l, yi, notwseparator); if (!(xi < xj && xj < yi && yi < yj)) return; p = q = malloc(yj - xi); p = mempcpy(p, l->buf + yi, yj - yi); p = mempcpy(p, l->buf + xj, yi - xj); p = mempcpy(p, l->buf + xi, xj - xi); _unassert(p - q == yj - xi); memcpy(l->buf + xi, q, p - q); l->pos = yj; free(q); linenoiseRefreshLine(l); } static void linenoiseEditSqueeze(struct linenoiseState *l) { size_t i, j; i = Backwards(l, l->pos, iswseparator); j = Forwards(l, l->pos, iswseparator); if (!(i < j)) return; memmove(l->buf + i, l->buf + j, l->len - j + 1); l->len -= j - i; l->pos = i; linenoiseRefreshLine(l); } static void linenoiseEditMark(struct linenoiseState *l) { l->mark = l->pos; } static void linenoiseEditGoto(struct linenoiseState *l) { if (l->mark > l->len) return; l->pos = MIN(l->mark, l->len); linenoiseRefreshLine(l); } static size_t linenoiseEscape(char *d, const char *s, size_t n) { char *p; size_t i; unsigned c, w, l; for (p = d, l = i = 0; i < n; ++i) { switch ((c = s[i] & 255)) { CASE('\e', w = READ16LE("\\e")); CASE('\a', w = READ16LE("\\a")); CASE('\b', w = READ16LE("\\b")); CASE('\t', w = READ16LE("\\t")); CASE('\n', w = READ16LE("\\n")); CASE('\v', w = READ16LE("\\v")); CASE('\f', w = READ16LE("\\f")); CASE('\r', w = READ16LE("\\r")); CASE('"', w = READ16LE("\\\"")); CASE('\'', w = READ16LE("\\\'")); CASE('\\', w = READ16LE("\\\\")); default: if ((0x00 <= c && c <= 0x1F) || c == 0x7F || (c == '?' && l == '?')) { w = READ16LE("\\x"); w |= "0123456789abcdef"[(c & 0xF0) >> 4] << 020; w |= "0123456789abcdef"[(c & 0x0F) >> 0] << 030; } else { w = c; } break; } WRITE32LE(p, w); p += (_bsr(w) >> 3) + 1; l = w; } return p - d; } static void linenoiseEditInterrupt(struct linenoiseState *l) { gotint = SIGINT; } static void linenoiseEditQuit(struct linenoiseState *l) { gotint = SIGQUIT; } static void linenoiseEditSuspend(struct linenoiseState *l) { raise(SIGSTOP); } static void linenoiseEditPause(struct linenoiseState *l) { tcflow(l->ofd, TCOOFF); ispaused = 1; } static void linenoiseEditCtrlq(struct linenoiseState *l) { } /** * Moves last item inside current s-expression to outside, e.g. * * (a| b c) * (a| b) c * * The cursor position changes only if a paren is moved before it: * * (a b c |) * (a b) c | * * To accommodate non-LISP languages we connect unspaced outer symbols: * * f(a,| b, g()) * f(a,| b), g() * * Our standard keybinding is ALT-SHIFT-B. */ static void linenoiseEditBarf(struct linenoiseState *l) { struct rune r; unsigned long w; size_t i, pos, depth = 0; unsigned lhs, rhs, end, *stack = 0; /* go as far right within current s-expr as possible */ for (pos = l->pos;; pos += r.n) { if (pos == l->len) goto Finish; r = GetUtf8(l->buf + pos, l->len - pos); if (depth) { if (r.c == stack[depth - 1]) { --depth; } } else { if ((rhs = GetMirrorRight(r.c))) { stack = (unsigned *)realloc(stack, ++depth * sizeof(*stack)); stack[depth - 1] = rhs; } else if (GetMirrorLeft(r.c)) { end = pos; break; } } } /* go back one item */ pos = Backwards(l, pos, isxseparator); for (;; pos = i) { if (!pos) goto Finish; i = Backward(l, pos); r = GetUtf8(l->buf + i, l->len - i); if (depth) { if (r.c == stack[depth - 1]) { --depth; } } else { if ((lhs = GetMirrorLeft(r.c))) { stack = (unsigned *)realloc(stack, ++depth * sizeof(*stack)); stack[depth - 1] = lhs; } else if (iswseparator(r.c)) { break; } } } pos = Backwards(l, pos, isxseparator); /* now move the text */ r = GetUtf8(l->buf + end, l->len - end); memmove(l->buf + pos + r.n, l->buf + pos, end - pos); w = _tpenc(r.c); for (i = 0; i < r.n; ++i) { l->buf[pos + i] = w; w >>= 8; } if (l->pos > pos) { l->pos += r.n; } linenoiseRefreshLine(l); Finish: free(stack); } /** * Moves first item outside current s-expression to inside, e.g. * * (a| b) c d * (a| b c) d * * To accommodate non-LISP languages we connect unspaced outer symbols: * * f(a,| b), g() * f(a,| b, g()) * * Our standard keybinding is ALT-SHIFT-S. */ static void linenoiseEditSlurp(struct linenoiseState *l) { char rp[6]; struct rune r; size_t pos, depth = 0; unsigned rhs, point = 0, start = 0, *stack = 0; /* go to outside edge of current s-expr */ for (pos = l->pos; pos < l->len; pos += r.n) { r = GetUtf8(l->buf + pos, l->len - pos); if (depth) { if (r.c == stack[depth - 1]) { --depth; } } else { if ((rhs = GetMirrorRight(r.c))) { stack = (unsigned *)realloc(stack, ++depth * sizeof(*stack)); stack[depth - 1] = rhs; } else if (GetMirrorLeft(r.c)) { point = pos; pos += r.n; start = pos; break; } } } /* go forward one item */ pos = Forwards(l, pos, isxseparator); for (; pos < l->len; pos += r.n) { r = GetUtf8(l->buf + pos, l->len - pos); if (depth) { if (r.c == stack[depth - 1]) { --depth; } } else { if ((rhs = GetMirrorRight(r.c))) { stack = (unsigned *)realloc(stack, ++depth * sizeof(*stack)); stack[depth - 1] = rhs; } else if (iswseparator(r.c)) { break; } } } /* now move the text */ memcpy(rp, l->buf + point, start - point); memmove(l->buf + point, l->buf + start, pos - start); memcpy(l->buf + pos - (start - point), rp, start - point); linenoiseRefreshLine(l); free(stack); } struct linenoiseState *linenoiseBegin(const char *prompt, int ifd, int ofd) { struct linenoiseState *l; if (!(l = calloc(1, sizeof(*l)))) { return 0; } if (!(l->buf = malloc((l->buflen = 32)))) { free(l); return 0; } l->state = DUFF_ROUTINE_START; l->buf[0] = 0; l->ifd = ifd; l->ofd = ofd; l->prompt = strdup(prompt ? prompt : ""); l->ws = linenoiseGetTerminalSize(l->ws, l->ifd, l->ofd); linenoiseWriteStr(l->ofd, l->prompt); abInit(&l->ab); return l; } void linenoiseReset(struct linenoiseState *l) { l->buf[0] = 0; l->dirty = true; l->final = 0; l->hindex = 0; l->len = 0; l->mark = 0; l->oldpos = 0; l->pos = 0; l->yi = 0; l->yj = 0; } void linenoiseEnd(struct linenoiseState *l) { if (l) { linenoiseFreeCompletions(&l->lc); abFree(&l->ab); free(l->oldprompt); free(l->prompt); free(l->buf); free(l); } } static int CompareStrings(const void *a, const void *b) { return strcmp(*(const char **)a, *(const char **)b); } /** * Runs linenoise engine. * * This function is the core of the line editing capability of linenoise. * It expects 'fd' to be already in "raw mode" so that every key pressed * will be returned ASAP to read(). The exit conditions are: * * 1. ret > 0 / buf ≠ 0 / errno = ? -- means we got some * 2. ret = 0 / buf ≠ 0 / errno = ? -- means empty line * 3. ret = 0 / buf = 0 / errno = ? -- means eof * 4. ret = -1 / buf = ? / errno ≠ 0 -- means error * * @param l is linenoise reader object created by linenoiseBegin() * @param prompt if non-null is copied and replaces current prompt * @param block if false will cause -1 / EAGAIN if there's no data * @return chomped character count in buf >=0 or -1 on eof / error */ ssize_t linenoiseEdit(struct linenoiseState *l, const char *prompt, char **obuf, bool block) { ssize_t rc; char seq[16]; gotint = 0; if (prompt && l->state != DUFF_ROUTINE_SEARCH && (!l->prompt || strcmp(prompt, l->prompt))) { free(l->prompt); l->prompt = strdup(prompt); } switch (l->state) { for (;;) { DUFF_ROUTINE_READ(DUFF_ROUTINE_LOOP); HandleRead: if (!rc && l->len) { rc = 1; seq[0] = '\r'; seq[1] = 0; } else if (!rc || rc == -1) { free(history[--historylen]); history[historylen] = 0; linenoiseReset(l); if (!rc) *obuf = 0; return rc; } // handle reverse history search if (seq[0] == CTRL('R')) { int fail, added, oldpos; if (historylen <= 1) continue; l->ab.len = 0; l->olderpos = l->pos; l->oldprompt = l->prompt; l->oldindex = l->hindex; l->prompt = 0; for (fail = l->matlen = 0;;) { free(l->prompt); l->prompt = linenoiseMakeSearchPrompt(fail, l->ab.b, l->matlen); DUFF_ROUTINE_READ(DUFF_ROUTINE_SEARCH); fail = 1; added = 0; l->j = l->pos; l->i = l->hindex; if (rc > 0) { if (seq[0] == CTRL('?') || seq[0] == CTRL('H')) { if (l->ab.len) { --l->ab.len; l->matlen = MIN(l->matlen, l->ab.len); } } else if (seq[0] == CTRL('R')) { if (l->j) { --l->j; } else if (l->i + 1 < historylen) { ++l->i; l->j = strlen(history[historylen - 1 - l->i]); } } else if (seq[0] == CTRL('G')) { linenoiseEditHistoryGoto(l, l->oldindex); l->pos = l->olderpos; break; } else if (iswcntrl(seq[0])) { // only sees canonical c0 break; } else { abAppend(&l->ab, seq, rc); added = rc; } } else { break; } while (l->i < historylen) { int k; char *p; const char *q; p = history[historylen - 1 - l->i]; k = strlen(p); l->j = l->j >= 0 ? MIN(k, l->j + l->ab.len) : k; if ((q = FindSubstringReverse(p, l->j, l->ab.b, l->ab.len))) { linenoiseEditHistoryGoto(l, l->i); l->pos = q - p; fail = 0; if (added) { l->matlen += added; added = 0; } break; } else { l->i = l->i + 1; l->j = -1; } } } free(l->prompt); l->prompt = l->oldprompt; l->oldprompt = 0; linenoiseRefreshLine(l); goto HandleRead; } // handle tab and tab-tab completion if (seq[0] == '\t' && completionCallback) { size_t i, n, m; // we know that the user pressed tab once rc = 0; linenoiseFreeCompletions(&l->lc); i = Backwards(l, l->pos, iswname); { char *s = strndup(l->buf + i, l->pos - i); completionCallback(s, &l->lc); free(s); } m = GetCommonPrefixLength(&l->lc); if (m > l->pos - i || (m == l->pos - i && l->lc.len == 1)) { // on common prefix (or single completion) we complete and return n = i + m + (l->len - l->pos); if (linenoiseGrow(l, n + 1)) { memmove(l->buf + i + m, l->buf + l->pos, l->len - l->pos + 1); memcpy(l->buf + i, l->lc.cvec[0], m); l->pos = i + m; l->len = n; } continue; } if (l->lc.len > 1) { qsort(l->lc.cvec, l->lc.len, sizeof(*l->lc.cvec), CompareStrings); // if there's a multiline completions, then do nothing and wait and // see if the user presses tab again. if the user does this we then // print ALL the completions, to above the editing line for (i = 0; i < l->lc.len; ++i) { char *s = l->lc.cvec[i]; l->lc.cvec[i] = VisualizeControlCodes(s, -1, 0); free(s); } for (;;) { DUFF_ROUTINE_READ(2); if (rc == 1 && seq[0] == '\t') { const char **p; struct abuf ab; int i, k, x, y, xn, yn, xy, itemlen; itemlen = linenoiseMaxCompletionWidth(&l->lc) + 4; xn = MAX(1, (l->ws.ws_col - 1) / itemlen); yn = (l->lc.len + (xn - 1)) / xn; if (!__builtin_mul_overflow(xn, yn, &xy) && (p = calloc(xy, sizeof(char *)))) { // arrange in column major order for (i = x = 0; x < xn; ++x) { for (y = 0; y < yn; ++y) { p[y * xn + x] = i < l->lc.len ? l->lc.cvec[i++] : ""; } } abInit(&ab); abAppends(&ab, "\r\n\e[K"); for (x = i = 0; i < xy; ++i) { n = GetMonospaceWidth(p[i], strlen(p[i]), 0); abAppends(&ab, p[i]); for (k = n; k < itemlen; ++k) { abAppendw(&ab, ' '); } if (++x == xn) { abAppendw(&ab, READ16LE("\r\n")); x = 0; } } ab.len -= 2; abAppends(&ab, "\n"); linenoiseWriteStr(l->ofd, ab.b); linenoiseRefreshLine(l); abFree(&ab); free(p); } } else { goto HandleRead; } } } } // handle (1) emacs keyboard combos // (2) otherwise sigint exit if (seq[0] == CTRL('C')) { DUFF_ROUTINE_READ(3); if (rc == 1) { switch (seq[0]) { CASE(CTRL('C'), linenoiseEditInterrupt(l)); CASE(CTRL('B'), linenoiseEditBarf(l)); CASE(CTRL('S'), linenoiseEditSlurp(l)); default: goto HandleRead; } continue; } else { goto HandleRead; } } // handle (1) unpausing terminal after ctrl-s // (2) otherwise raw keystroke inserts if (seq[0] == CTRL('Q')) { if (ispaused) { linenoiseUnpause(l->ofd); } else { DUFF_ROUTINE_READ(4); if (rc > 0) { char esc[sizeof(seq) * 4]; size_t m = linenoiseEscape(esc, seq, rc); linenoiseEditInsert(l, esc, m); } else { goto HandleRead; } } continue; } // handle enter key if (seq[0] == '\r') { char *p; l->final = 1; free(history[--historylen]); history[historylen] = 0; linenoiseEditEnd(l); linenoiseRefreshLineForce(l); p = strdup(l->buf); linenoiseReset(l); if (p) { *obuf = p; l->state = DUFF_ROUTINE_START; return l->len; } else { return -1; } DUFF_ROUTINE_LABEL(DUFF_ROUTINE_START); linenoiseHistoryAdd(""); continue; } // handle keystrokes that don't need read() switch (seq[0]) { CASE(CTRL('P'), linenoiseEditUp(l)); CASE(CTRL('E'), linenoiseEditEnd(l)); CASE(CTRL('N'), linenoiseEditDown(l)); CASE(CTRL('A'), linenoiseEditHome(l)); CASE(CTRL('B'), linenoiseEditLeft(l)); CASE(CTRL('@'), linenoiseEditMark(l)); CASE(CTRL('Y'), linenoiseEditYank(l)); CASE(CTRL('F'), linenoiseEditRight(l)); CASE(CTRL('\\'), linenoiseEditQuit(l)); CASE(CTRL('S'), linenoiseEditPause(l)); CASE(CTRL('?'), linenoiseEditRubout(l)); CASE(CTRL('H'), linenoiseEditRubout(l)); CASE(CTRL('L'), linenoiseEditRefresh(l)); CASE(CTRL('Z'), linenoiseEditSuspend(l)); CASE(CTRL('U'), linenoiseEditKillLeft(l)); CASE(CTRL('T'), linenoiseEditTranspose(l)); CASE(CTRL('K'), linenoiseEditKillRight(l)); CASE(CTRL('W'), linenoiseEditRuboutWord(l)); case CTRL('X'): if (l->seq[1][0] == CTRL('X')) { linenoiseEditGoto(l); } break; case CTRL('D'): if (l->len) { linenoiseEditDelete(l); } else { free(history[--historylen]); history[historylen] = 0; linenoiseReset(l); *obuf = 0; return 0; } break; case '\e': // handle ansi escape if (rc < 2) break; switch (seq[1]) { CASE('<', linenoiseEditBof(l)); CASE('>', linenoiseEditEof(l)); CASE('y', linenoiseEditRotate(l)); CASE('\\', linenoiseEditSqueeze(l)); CASE('b', linenoiseEditLeftWord(l)); CASE('f', linenoiseEditRightWord(l)); CASE('h', linenoiseEditRuboutWord(l)); CASE('d', linenoiseEditDeleteWord(l)); CASE('l', linenoiseEditLowercaseWord(l)); CASE('u', linenoiseEditUppercaseWord(l)); CASE('c', linenoiseEditCapitalizeWord(l)); CASE('t', linenoiseEditTransposeWords(l)); CASE(CTRL('B'), linenoiseEditLeftExpr(l)); CASE(CTRL('F'), linenoiseEditRightExpr(l)); CASE(CTRL('H'), linenoiseEditRuboutWord(l)); case '[': // handle ansi csi sequences if (rc < 3) break; if (seq[2] >= '0' && seq[2] <= '9') { if (rc < 4) break; if (seq[3] == '~') { switch (seq[2]) { CASE('1', linenoiseEditHome(l)); // \e[1~ CASE('3', linenoiseEditDelete(l)); // \e[3~ CASE('4', linenoiseEditEnd(l)); // \e[4~ default: break; } } else if (rc == 6 && seq[2] == '1' && seq[3] == ';' && seq[4] == '5') { switch (seq[5]) { CASE('C', linenoiseEditRightWord(l)); // \e[1;5C ctrl-right CASE('D', linenoiseEditLeftWord(l)); // \e[1;5D ctrl-left default: break; } } } else { switch (seq[2]) { CASE('A', linenoiseEditUp(l)); CASE('B', linenoiseEditDown(l)); CASE('C', linenoiseEditRight(l)); CASE('D', linenoiseEditLeft(l)); CASE('H', linenoiseEditHome(l)); CASE('F', linenoiseEditEnd(l)); default: break; } } break; case 'O': if (rc < 3) break; switch (seq[2]) { CASE('A', linenoiseEditUp(l)); CASE('B', linenoiseEditDown(l)); CASE('C', linenoiseEditRight(l)); CASE('D', linenoiseEditLeft(l)); CASE('H', linenoiseEditHome(l)); CASE('F', linenoiseEditEnd(l)); default: break; } break; case '\e': if (rc < 3) break; switch (seq[2]) { case '[': if (rc < 4) break; switch (seq[3]) { CASE('C', linenoiseEditRightExpr(l)); // \e\e[C alt-right CASE('D', linenoiseEditLeftExpr(l)); // \e\e[D alt-left default: break; } break; case 'O': if (rc < 4) break; switch (seq[3]) { CASE('C', linenoiseEditRightExpr(l)); // \e\eOC alt-right CASE('D', linenoiseEditLeftExpr(l)); // \e\eOD alt-left default: break; } break; default: break; } break; default: break; } break; default: // handle normal keystrokes if (!iswcntrl(seq[0])) { // only sees canonical c0 if (xlatCallback) { uint64_t w; struct rune rune; rune = GetUtf8(seq, rc); w = _tpenc(xlatCallback(rune.c)); rc = 0; do { seq[rc++] = w; } while ((w >>= 8)); } linenoiseEditInsert(l, seq, rc); } break; } } default: unreachable; } } void linenoiseFree(void *ptr) { free(ptr); } void linenoiseHistoryFree(void) { size_t i; for (i = 0; i < LINENOISE_MAX_HISTORY; i++) { if (history[i]) { free(history[i]); history[i] = 0; } } historylen = 0; } int linenoiseHistoryAdd(const char *line) { char *linecopy; if (!LINENOISE_MAX_HISTORY) return 0; if (historylen && !strcmp(history[historylen - 1], line)) return 0; if (!(linecopy = strdup(line))) return 0; if (historylen == LINENOISE_MAX_HISTORY) { free(history[0]); memmove(history, history + 1, sizeof(char *) * (LINENOISE_MAX_HISTORY - 1)); historylen--; } history[historylen++] = linecopy; return 1; } /** * Saves line editing history to file. * * @return 0 on success, or -1 w/ errno */ int linenoiseHistorySave(const char *filename) { int j; FILE *fp; mode_t old_umask; if (filename) { old_umask = umask(S_IXUSR | S_IRWXG | S_IRWXO); fp = fopen(filename, "w"); umask(old_umask); if (!fp) return -1; chmod(filename, S_IRUSR | S_IWUSR); for (j = 0; j < historylen; j++) { fputs(history[j], fp); fputc('\n', fp); } fclose(fp); } return 0; } /** * Loads history from the specified file. * * If the file doesn't exist, zero is returned and this will do nothing. * If the file does exists and the operation succeeded zero is returned * otherwise on error -1 is returned. * * @return 0 on success, or -1 w/ errno */ int linenoiseHistoryLoad(const char *filename) { char **h; int rc, fd, err; size_t i, j, k, n, t; char *m, *e, *p, *q, *f, *s; err = errno, rc = 0; if (!LINENOISE_MAX_HISTORY) return 0; if (!(h = (char **)calloc(2 * LINENOISE_MAX_HISTORY, sizeof(char *)))) return -1; if ((fd = open(filename, O_RDONLY)) != -1) { if ((n = GetFdSize(fd))) { if ((m = (char *)mmap(0, n, PROT_READ, MAP_SHARED, fd, 0)) != MAP_FAILED) { for (i = 0, e = (p = m) + n; p < e; p = f + 1) { if (!(q = (char *)memchr(p, '\n', e - p))) q = e; for (f = q; q > p; --q) { if (q[-1] != '\n' && q[-1] != '\r') break; } if (q > p) { h[i * 2 + 0] = p; h[i * 2 + 1] = q; i = (i + 1) % LINENOISE_MAX_HISTORY; } } linenoiseHistoryFree(); for (j = 0; j < LINENOISE_MAX_HISTORY; ++j) { if (h[(k = (i + j) % LINENOISE_MAX_HISTORY) * 2]) { if ((s = malloc((t = h[k * 2 + 1] - h[k * 2]) + 1))) { memcpy(s, h[k * 2], t), s[t] = 0; history[historylen++] = s; } } } munmap(m, n); } else { rc = -1; } } close(fd); } else if (errno == ENOENT) { errno = err; } else { rc = -1; } free(h); return rc; } /** * Returns appropriate system config location. * @return path needing free or null if prog is null */ char *linenoiseGetHistoryPath(const char *prog) { struct abuf path; const char *a, *b; if (!prog) return 0; if (strchr(prog, '/') || strchr(prog, '.')) return strdup(prog); abInit(&path); b = ""; if (!(a = getenv("HOME"))) { if (!(a = getenv("HOMEDRIVE")) || !(b = getenv("HOMEPATH"))) { a = ""; } } if (*a) { abAppends(&path, a); abAppends(&path, b); if (!_endswith(path.b, "/") && !_endswith(path.b, "\\")) { abAppendw(&path, '/'); } } abAppendw(&path, '.'); abAppends(&path, prog); abAppends(&path, "_history"); return path.b; } /** * Reads line interactively. * * This function can be used instead of linenoise() in cases where we * know for certain we're dealing with a terminal, which means we can * avoid linking any stdio code. * * @return chomped allocated string of read line or null on eof/error */ char *linenoiseRaw(const char *prompt, int infd, int outfd) { char *buf; ssize_t rc; struct sigaction sa[3]; struct linenoiseState *l; if (linenoiseEnableRawMode(infd) == -1) return 0; sigemptyset(&sa->sa_mask); sa->sa_flags = SA_NODEFER; sa->sa_handler = linenoiseOnInt; sigaction(SIGINT, sa, sa + 1); sigaction(SIGQUIT, sa, sa + 2); l = linenoiseBegin(prompt, infd, outfd); rc = linenoiseEdit(l, 0, &buf, true); linenoiseEnd(l); linenoiseDisableRawMode(); sigaction(SIGQUIT, sa + 2, 0); sigaction(SIGINT, sa + 1, 0); if (gotint) { if (rc != -1) { free(buf); } raise(gotint); errno = EINTR; gotint = 0; rc = -1; } if (rc != -1) { if (buf) { linenoiseWriteStr(outfd, "\n"); } return buf; } else { return 0; } } static int linenoiseFallback(const char *prompt, char **res) { if (prompt && *prompt && (strchr(prompt, '\n') || strchr(prompt, '\t') || strchr(prompt + 1, '\r'))) { errno = EINVAL; *res = 0; return 1; } if (!linenoiseIsTerminal()) { if (prompt && *prompt && linenoiseIsTeletype()) { fputs(prompt, stdout); fflush(stdout); } *res = linenoiseGetLine(stdin); return 1; } else { return 0; } } /** * Reads line intelligently. * * The high level function that is the main API of the linenoise library. * This function checks if the terminal has basic capabilities, just checking * for a blacklist of inarticulate terminals, and later either calls the line * editing function or uses dummy fgets() so that you will be able to type * something even in the most desperate of the conditions. * * @param prompt is printed before asking for input if we have a term * and this may be set to empty or null to disable and prompt may * contain ansi escape sequences, color, utf8, etc. * @return chomped allocated string of read line or null on eof/error */ char *linenoise(const char *prompt) { char *res; bool rm, rs; if (linenoiseFallback(prompt, &res)) return res; fflush(stdout); fflush(stdout); rm = __replmode; rs = __replstderr; __replmode = true; if (isatty(2)) __replstderr = true; res = linenoiseRaw(prompt, fileno(stdin), fileno(stdout)); __replstderr = rs; __replmode = rm; return res; } /** * Reads line intelligently w/ history, e.g. * * // see ~/.foo_history * main() { * char *line; * while ((line = linenoiseWithHistory("IN> ", "foo"))) { * printf("OUT> %s\n", line); * free(line); * } * } * * @param prompt is printed before asking for input if we have a term * and this may be set to empty or null to disable and prompt may * contain ansi escape sequences, color, utf8, etc. * @param prog is name of your app, used to generate history filename * however if it contains a slash / dot then we'll assume prog is * the history filename which as determined by the caller * @return chomped allocated string of read line or null on eof/error * noting that on eof your errno is not changed */ char *linenoiseWithHistory(const char *prompt, const char *prog) { char *path, *line, *res; if (linenoiseFallback(prompt, &res)) return res; fflush(stdout); if ((path = linenoiseGetHistoryPath(prog))) { if (linenoiseHistoryLoad(path) == -1) { fprintf(stderr, "%s: failed to load history: %m\n", path); free(path); path = 0; } } line = linenoise(prompt); if (path && line && *line) { /* history here is inefficient but helpful when the user has multiple * repls open at the same time, so history propagates between them */ linenoiseHistoryLoad(path); linenoiseHistoryAdd(line); linenoiseHistorySave(path); } free(path); return line; } /** * Returns 0 otherwise SIGINT or SIGQUIT if interrupt was received. */ int linenoiseGetInterrupt(void) { return gotint; } /** * Registers tab completion callback. */ void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) { completionCallback = fn; } /** * Registers hints callback. * * Register a hits function to be called to show hits to the user at the * right of the prompt. */ void linenoiseSetHintsCallback(linenoiseHintsCallback *fn) { hintsCallback = fn; } /** * Sets free hints callback. * * This registers a function to free the hints returned by the hints * callback registered with linenoiseSetHintsCallback(). */ void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *fn) { freeHintsCallback = fn; } /** * Sets character translation callback. */ void linenoiseSetXlatCallback(linenoiseXlatCallback *fn) { xlatCallback = fn; } /** * Adds completion. * * This function is used by the callback function registered by the user * in order to add completion options given the input string when the * user typed <tab>. See the example.c source code for a very easy to * understand example. */ void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { size_t len; char *copy, **cvec; if ((copy = malloc((len = strlen(str)) + 1))) { memcpy(copy, str, len + 1); if ((cvec = realloc(lc->cvec, (lc->len + 1) * sizeof(*lc->cvec)))) { lc->cvec = cvec; lc->cvec[lc->len++] = copy; } else { free(copy); } } } /** * Frees list of completion option populated by linenoiseAddCompletion(). */ void linenoiseFreeCompletions(linenoiseCompletions *lc) { size_t i; if (lc->cvec) { for (i = 0; i < lc->len; i++) { free(lc->cvec[i]); } free(lc->cvec); } lc->cvec = 0; lc->len = 0; } /** * Enables "mask mode". * * When it is enabled, instead of the input that the user is typing, the * terminal will just display a corresponding number of asterisks, like * "****". This is useful for passwords and other secrets that should * not be displayed. * * @see linenoiseMaskModeDisable() */ void linenoiseMaskModeEnable(void) { maskmode = 1; } /** * Disables "mask mode". */ void linenoiseMaskModeDisable(void) { maskmode = 0; } static void linenoiseAtExit(void) { linenoiseDisableRawMode(); linenoiseHistoryFree(); linenoiseRingFree(); } static textstartup void linenoiseInit() { atexit(linenoiseAtExit); } const void *const linenoiseCtor[] initarray = { linenoiseInit, };
79,975
2,658
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/README.txt
GETOPT(3) Cosmopolitan Library Functions Manual GETOPT(3) 𝐍𝐀𝐌𝐄 𝗴𝗲𝘁𝗼𝗽𝘁 — get option character from command line argument list 𝐒𝐘𝐍𝐎𝐏𝐒𝐈𝐒 #𝗶𝗻𝗰𝗹𝘂𝗱𝗲 <𝘂𝗻𝗶𝘀𝘁𝗱.𝗵> e̲x̲t̲e̲r̲n̲ c̲h̲a̲r̲ *̲o̲p̲t̲a̲r̲g̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲e̲r̲r̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲i̲n̲d̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲o̲p̲t̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲r̲e̲s̲e̲t̲;̲ i̲n̲t̲ 𝗴𝗲𝘁𝗼𝗽𝘁(i̲n̲t̲ a̲r̲g̲c̲, c̲h̲a̲r̲ *̲ c̲o̲n̲s̲t̲ *̲a̲r̲g̲v̲, c̲o̲n̲s̲t̲ c̲h̲a̲r̲ *̲o̲p̲t̲s̲t̲r̲i̲n̲g̲); 𝐃𝐄𝐒𝐂𝐑𝐈𝐏𝐓𝐈𝐎𝐍 The 𝗴𝗲𝘁𝗼𝗽𝘁() function incrementally parses a command line argument list a̲r̲g̲v̲ and returns the next k̲n̲o̲w̲n̲ option character. An option character is k̲n̲o̲w̲n̲ if it has been specified in the string of accepted option characters, o̲p̲t̲s̲t̲r̲i̲n̲g̲. The option string o̲p̲t̲s̲t̲r̲i̲n̲g̲ may contain the following elements: individual characters, characters followed by a colon, and charac‐ ters followed by two colons. A character followed by a single colon indicates that an argument is to follow the option on the command line. Two colons indicates that the argument is optional - this is an extension not covered by POSIX. For example, an option string "x" recognizes an option -𝘅, and an option string "x:" rec‐ ognizes an option and argument -𝘅 a̲r̲g̲u̲m̲e̲n̲t̲. It does not matter to 𝗴𝗲𝘁𝗼𝗽𝘁() if a following argument has leading whitespace; except in the case where the argument is optional, denoted with two colons, no leading whitespace is permitted. On return from 𝗴𝗲𝘁𝗼𝗽𝘁(), o̲p̲t̲a̲r̲g̲ points to an option argument, if it is anticipated, and the variable o̲p̲t̲i̲n̲d̲ contains the index to the next a̲r̲g̲v̲ argument for a subsequent call to 𝗴𝗲𝘁𝗼𝗽𝘁(). The variables o̲p̲t̲e̲r̲r̲ and o̲p̲t̲i̲n̲d̲ are both initialized to 1. The o̲p̲t̲i̲n̲d̲ variable may be set to another value larger than 0 before a set of calls to 𝗴𝗲𝘁𝗼𝗽𝘁() in order to skip over more or less a̲r̲g̲v̲ entries. An o̲p̲t̲i̲n̲d̲ value of 0 is reserved for compatibility with GNU 𝗴𝗲𝘁𝗼𝗽𝘁(). In order to use 𝗴𝗲𝘁𝗼𝗽𝘁() to evaluate multiple sets of arguments, or to evaluate a single set of arguments multiple times, the variable o̲p̲t̲r̲e̲s̲e̲t̲ must be set to 1 before the second and each additional set of calls to 𝗴𝗲𝘁𝗼𝗽𝘁(), and the variable o̲p̲t̲i̲n̲d̲ must be reinitial‐ ized. The 𝗴𝗲𝘁𝗼𝗽𝘁() function returns -1 when the argument list is exhausted. The interpretation of options in the argument list may be cancelled by the option ‘--’ (double dash) which causes 𝗴𝗲𝘁𝗼𝗽𝘁() to signal the end of argument processing and return -1. When all options have been processed (i.e., up to the first non-option argu‐ ment), 𝗴𝗲𝘁𝗼𝗽𝘁() returns -1. 𝐑𝐄𝐓𝐔𝐑𝐍 𝐕𝐀𝐋𝐔𝐄𝐒 The 𝗴𝗲𝘁𝗼𝗽𝘁() function returns the next known option character in o̲p̲t̲s̲t̲r̲i̲n̲g̲. If 𝗴𝗲𝘁𝗼𝗽𝘁() encounters a character not found in o̲p̲t̲s̲t̲r̲i̲n̲g̲ or if it detects a missing option argument, it returns ‘?’ (question mark). If o̲p̲t̲s̲t̲r̲i̲n̲g̲ has a leading ‘:’ then a missing option argument causes ‘:’ to be returned instead of ‘?’. In either case, the variable o̲p̲t̲o̲p̲t̲ is set to the character that caused the error. The 𝗴𝗲𝘁𝗼𝗽𝘁() function returns -1 when the argu‐ ment list is exhausted. 𝐄𝐗𝐀𝐌𝐏𝐋𝐄𝐒 The following code accepts the options -𝗯 and -𝗳 a̲r̲g̲u̲m̲e̲n̲t̲ and adjusts a̲r̲g̲c̲ and a̲r̲g̲v̲ after option argument processing has com‐ pleted. int bflag, ch, fd; bflag = 0; while ((ch = getopt(argc, argv, "bf:")) != -1) { switch (ch) { case 'b': bflag = 1; break; case 'f': if ((fd = open(optarg, O_RDONLY, 0)) == -1) err(1, "%s", optarg); break; default: usage(); } } argc -= optind; argv += optind; 𝐃𝐈𝐀𝐆𝐍𝐎𝐒𝐓𝐈𝐂𝐒 If the 𝗴𝗲𝘁𝗼𝗽𝘁() function encounters a character not found in the string o̲p̲t̲s̲t̲r̲i̲n̲g̲ or detects a missing option argument, it writes an error message to s̲t̲d̲e̲r̲r̲ and returns ‘?’. Setting o̲p̲t̲e̲r̲r̲ to a zero will disable these error messages. If o̲p̲t̲s̲t̲r̲i̲n̲g̲ has a leading ‘:’ then a missing option argument causes a ‘:’ to be returned in addi‐ tion to suppressing any error messages. Option arguments are allowed to begin with ‘-’; this is reasonable but reduces the amount of error checking possible. 𝐒𝐄𝐄 𝐀𝐋𝐒𝐎 getopt(1), getopt_long(3), getsubopt(3) 𝐒𝐓𝐀𝐍𝐃𝐀𝐑𝐃𝐒 The 𝗴𝗲𝘁𝗼𝗽𝘁() function implements a superset of the functionality specified by IEEE Std 1003.1 (“POSIX.1”). The following extensions are supported: · The o̲p̲t̲r̲e̲s̲e̲t̲ variable was added to make it possible to call the 𝗴𝗲𝘁𝗼𝗽𝘁() function multiple times. · If the o̲p̲t̲i̲n̲d̲ variable is set to 0, 𝗴𝗲𝘁𝗼𝗽𝘁() will behave as if the o̲p̲t̲r̲e̲s̲e̲t̲ variable has been set. This is for compatibility with GNU 𝗴𝗲𝘁𝗼𝗽𝘁(). New code should use o̲p̲t̲r̲e̲s̲e̲t̲ instead. · If the first character of o̲p̲t̲s̲t̲r̲i̲n̲g̲ is a plus sign (‘+’), it will be ignored. This is for compatibility with GNU 𝗴𝗲𝘁𝗼𝗽𝘁(). · If the first character of o̲p̲t̲s̲t̲r̲i̲n̲g̲ is a dash (‘-’), non- options will be returned as arguments to the option character ‘\1’. This is for compatibility with GNU 𝗴𝗲𝘁𝗼𝗽𝘁(). · A single dash (‘-’) may be specified as a character in o̲p̲t̲s̲t̲r̲i̲n̲g̲, however it should n̲e̲v̲e̲r̲ have an argument associated with it. This allows 𝗴𝗲𝘁𝗼𝗽𝘁() to be used with programs that expect ‘-’ as an option flag. This practice is wrong, and should not be used in any current development. It is provided for backward compatibility o̲n̲l̲y̲. Care should be taken not to use ‘-’ as the first character in o̲p̲t̲s̲t̲r̲i̲n̲g̲ to avoid a semantic conflict with GNU 𝗴𝗲𝘁𝗼𝗽𝘁() semantics (see above). By default, a single dash causes 𝗴𝗲𝘁𝗼𝗽𝘁() to return -1. Historic BSD versions of 𝗴𝗲𝘁𝗼𝗽𝘁() set o̲p̲t̲o̲p̲t̲ to the last option character processed. However, this conflicts with IEEE Std 1003.1 (“POSIX.1”) which stipulates that o̲p̲t̲o̲p̲t̲ be set to the last charac‐ ter that caused an error. 𝐇𝐈𝐒𝐓𝐎𝐑𝐘 The 𝗴𝗲𝘁𝗼𝗽𝘁() function appeared in 4.3BSD. 𝐁𝐔𝐆𝐒 The 𝗴𝗲𝘁𝗼𝗽𝘁() function was once specified to return EOF instead of -1. This was changed by IEEE Std 1003.2-1992 (“POSIX.2”) to decou‐ ple 𝗴𝗲𝘁𝗼𝗽𝘁() from <s̲t̲d̲i̲o̲.̲h̲>. It is possible to handle digits as option letters. This allows 𝗴𝗲𝘁𝗼𝗽𝘁() to be used with programs that expect a number (“-3”) as an option. This practice is wrong, and should not be used in any cur‐ rent development. It is provided for backward compatibility o̲n̲l̲y̲. The following code fragment works in most cases and can handle mixed number and letter arguments. int aflag = 0, bflag = 0, ch, lastch = '\0'; int length = -1, newarg = 1, prevoptind = 1; while ((ch = getopt(argc, argv, "0123456789ab")) != -1) { switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (newarg || !isdigit(lastch)) length = 0; else if (length > INT_MAX / 10) usage(); length = (length * 10) + (ch - '0'); break; case 'a': aflag = 1; break; case 'b': bflag = 1; break; default: usage(); } lastch = ch; newarg = optind != prevoptind; prevoptind = optind; } COSMOPOLITAN January 4, 2016 BSD ──────────────────────────────────────────────────────────────────────────── GETOPT_LONG(3) Cosmopolitan Library Functions Manual GETOPT_LONG(3) 𝐍𝐀𝐌𝐄 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴, 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴_𝗼𝗻𝗹𝘆 — get long options from command line argument list 𝐒𝐘𝐍𝐎𝐏𝐒𝐈𝐒 #𝗶𝗻𝗰𝗹𝘂𝗱𝗲 <𝗴𝗲𝘁𝗼𝗽𝘁.𝗵> e̲x̲t̲e̲r̲n̲ c̲h̲a̲r̲ *̲o̲p̲t̲a̲r̲g̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲i̲n̲d̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲o̲p̲t̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲e̲r̲r̲;̲ e̲x̲t̲e̲r̲n̲ i̲n̲t̲ o̲p̲t̲r̲e̲s̲e̲t̲;̲ i̲n̲t̲ 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴(i̲n̲t̲ a̲r̲g̲c̲, c̲h̲a̲r̲ *̲ c̲o̲n̲s̲t̲ *̲a̲r̲g̲v̲, c̲o̲n̲s̲t̲ c̲h̲a̲r̲ *̲o̲p̲t̲s̲t̲r̲i̲n̲g̲, c̲o̲n̲s̲t̲ s̲t̲r̲u̲c̲t̲ o̲p̲t̲i̲o̲n̲ *̲l̲o̲n̲g̲o̲p̲t̲s̲, i̲n̲t̲ *̲l̲o̲n̲g̲i̲n̲d̲e̲x̲); i̲n̲t̲ 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴_𝗼𝗻𝗹𝘆(i̲n̲t̲ a̲r̲g̲c̲, c̲h̲a̲r̲ *̲ c̲o̲n̲s̲t̲ *̲a̲r̲g̲v̲, c̲o̲n̲s̲t̲ c̲h̲a̲r̲ *̲o̲p̲t̲s̲t̲r̲i̲n̲g̲, c̲o̲n̲s̲t̲ s̲t̲r̲u̲c̲t̲ o̲p̲t̲i̲o̲n̲ *̲l̲o̲n̲g̲o̲p̲t̲s̲, i̲n̲t̲ *̲l̲o̲n̲g̲i̲n̲d̲e̲x̲); 𝐃𝐄𝐒𝐂𝐑𝐈𝐏𝐓𝐈𝐎𝐍 The 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() function is similar to getopt(3) but it accepts options in two forms: words and characters. The 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() function provides a superset of the functionality of getopt(3). 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() can be used in two ways. In the first way, every long option understood by the program has a corresponding short option, and the option structure is only used to translate from long options to short options. When used in this fashion, 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() behaves identically to getopt(3). This is a good way to add long option processing to an existing program with the mini‐ mum of rewriting. In the second mechanism, a long option sets a flag in the o̲p̲t̲i̲o̲n̲ structure passed, or will store a pointer to the command line argu‐ ment in the o̲p̲t̲i̲o̲n̲ structure passed to it for options that take arguments. Additionally, the long option's argument may be speci‐ fied as a single argument with an equal sign, e.g. $ myprogram --myoption=somevalue When a long option is processed, the call to 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() will return 0. For this reason, long option processing without short‐ cuts is not backwards compatible with getopt(3). It is possible to combine these methods, providing for long options processing with short option equivalents for some options. Less frequently used options would be processed as long options only. Abbreviated long option names are accepted when 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() pro‐ cesses long options if the abbreviation is unique. An exact match is always preferred for a defined long option. The 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() call requires an array to be initialized describ‐ ing the long options. Each element of the array is a structure: struct option { char *name; int has_arg; int *flag; int val; }; The n̲a̲m̲e̲ field should contain the option name without the leading double dash. The h̲a̲s̲_a̲r̲g̲ field should be one of: no_argument no argument to the option is expected. required_argument an argument to the option is required. optional_argument an argument to the option may be pre‐ sented. If f̲l̲a̲g̲ is not NULL, then the integer pointed to by it will be set to the value in the v̲a̲l̲ field. If the f̲l̲a̲g̲ field is NULL, then the v̲a̲l̲ field will be returned. Setting f̲l̲a̲g̲ to NULL and setting v̲a̲l̲ to the corresponding short option will make this function act just like getopt(3). If the l̲o̲n̲g̲i̲n̲d̲e̲x̲ field is not NULL, then the integer pointed to by it will be set to the index of the long option relative to l̲o̲n̲g̲o̲p̲t̲s̲. The last element of the l̲o̲n̲g̲o̲p̲t̲s̲ array has to be filled with zeroes. The 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴_𝗼𝗻𝗹𝘆() function behaves identically to 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() with the exception that long options may start with ‘-’ in addition to ‘--’. If an option starting with ‘-’ does not match a long option but does match a single-character option, the single-character option is returned. 𝐑𝐄𝐓𝐔𝐑𝐍 𝐕𝐀𝐋𝐔𝐄𝐒 If the f̲l̲a̲g̲ field in struct option is NULL, 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() and 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴_𝗼𝗻𝗹𝘆() return the value specified in the v̲a̲l̲ field, which is usually just the corresponding short option. If f̲l̲a̲g̲ is not NULL, these functions return 0 and store v̲a̲l̲ in the location pointed to by f̲l̲a̲g̲. These functions return ‘:’ if there was a missing option argument, ‘?’ if the user specified an unknown or ambiguous option, and -1 when the argument list has been exhausted. 𝐈𝐌𝐏𝐋𝐄𝐌𝐄𝐍𝐓𝐀𝐓𝐈𝐎𝐍 𝐃𝐈𝐅𝐅𝐄𝐑𝐄𝐍𝐂𝐄𝐒 This section describes differences to the GNU implementation found in glibc-2.1.3: · handling of ‘-’ within the option string (not the first charac‐ ter): GNU treats a ‘-’ on the command line as a non-argument. OpenBSD a ‘-’ within the option string matches a ‘-’ (single dash) on the command line. This functionality is pro‐ vided for backward compatibility with programs, such as su(1), that use ‘-’ as an option flag. This prac‐ tice is wrong, and should not be used in any current development. · handling of ‘::’ in the option string in the presence of POSIXLY_CORRECT: Both GNU and OpenBSD ignore POSIXLY_CORRECT here and take ‘::’ to mean the preceding option takes an optional argument. · return value in case of missing argument if first character (after ‘+’ or ‘-’) in the option string is not ‘:’: GNU returns ‘?’ OpenBSD returns ‘:’ (since OpenBSD's getopt(3) does). · handling of ‘--a’ in getopt(3): GNU parses this as option ‘-’, option ‘a’. OpenBSD parses this as ‘--’, and returns -1 (ignoring the ‘a’) (because the original 𝗴𝗲𝘁𝗼𝗽𝘁() did.) · setting of o̲p̲t̲o̲p̲t̲ for long options with f̲l̲a̲g̲ non-NULL: GNU sets o̲p̲t̲o̲p̲t̲ to v̲a̲l̲. OpenBSD sets o̲p̲t̲o̲p̲t̲ to 0 (since v̲a̲l̲ would never be returned). · handling of ‘-W’ with ‘W;’ in the option string in getopt(3) (not 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴()): GNU causes a segmentation fault. OpenBSD no special handling is done; ‘W;’ is interpreted as two separate options, neither of which take an argu‐ ment. · setting of o̲p̲t̲a̲r̲g̲ for long options without an argument that are invoked via ‘-W’ (with ‘W;’ in the option string): GNU sets o̲p̲t̲a̲r̲g̲ to the option name (the argument of ‘-W’). OpenBSD sets o̲p̲t̲a̲r̲g̲ to NULL (the argument of the long option). · handling of ‘-W’ with an argument that is not (a prefix to) a known long option (with ‘W;’ in the option string): GNU returns ‘-W’ with o̲p̲t̲a̲r̲g̲ set to the unknown option. OpenBSD treats this as an error (unknown option) and returns ‘?’ with o̲p̲t̲o̲p̲t̲ set to 0 and o̲p̲t̲a̲r̲g̲ set to NULL (as GNU's man page documents). · The error messages are different. · OpenBSD does not permute the argument vector at the same points in the calling sequence as GNU does. The aspects normally used by the caller (ordering after -1 is returned, value of o̲p̲t̲i̲n̲d̲ relative to current positions) are the same, though. (We do fewer variable swaps.) 𝐄𝐍𝐕𝐈𝐑𝐎𝐍𝐌𝐄𝐍𝐓 POSIXLY_CORRECT If set, option processing stops when the first non-option is found and a leading ‘+’ in the o̲p̲t̲s̲t̲r̲i̲n̲g̲ is ignored. 𝐄𝐗𝐀𝐌𝐏𝐋𝐄𝐒 int bflag, ch, fd; int daggerset; /* options descriptor */ static struct option longopts[] = { { "buffy", no_argument, NULL, 'b' }, { "fluoride", required_argument, NULL, 'f' }, { "daggerset", no_argument, &daggerset, 1 }, { NULL, 0, NULL, 0 } }; bflag = 0; while ((ch = getopt_long(argc, argv, "bf:", longopts, NULL)) != -1) switch (ch) { case 'b': bflag = 1; break; case 'f': if ((fd = open(optarg, O_RDONLY, 0)) == -1) err(1, "unable to open %s", optarg); break; case 0: if (daggerset) fprintf(stderr, "Buffy will use her dagger to " "apply fluoride to dracula's teeth\n"); break; default: usage(); } argc -= optind; argv += optind; 𝐒𝐄𝐄 𝐀𝐋𝐒𝐎 getopt(3) 𝐇𝐈𝐒𝐓𝐎𝐑𝐘 The 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴() and 𝗴𝗲𝘁𝗼𝗽𝘁_𝗹𝗼𝗻𝗴_𝗼𝗻𝗹𝘆() functions first appeared in GNU libiberty. This implementation first appeared in OpenBSD 3.3. 𝐁𝐔𝐆𝐒 The a̲r̲g̲v̲ argument is not really const as its elements may be per‐ muted (unless POSIXLY_CORRECT is set). COSMOPOLITAN January 4, 2016 BSD
20,918
419
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/getopt.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_GETOPT THIRD_PARTY_GETOPT_ARTIFACTS += THIRD_PARTY_GETOPT_A THIRD_PARTY_GETOPT = $(THIRD_PARTY_GETOPT_A_DEPS) $(THIRD_PARTY_GETOPT_A) THIRD_PARTY_GETOPT_A = o/$(MODE)/third_party/getopt/getopt.a THIRD_PARTY_GETOPT_A_FILES := $(wildcard third_party/getopt/*) THIRD_PARTY_GETOPT_A_HDRS = $(filter %.h,$(THIRD_PARTY_GETOPT_A_FILES)) THIRD_PARTY_GETOPT_A_SRCS = $(filter %.c,$(THIRD_PARTY_GETOPT_A_FILES)) THIRD_PARTY_GETOPT_A_OBJS = $(THIRD_PARTY_GETOPT_A_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_GETOPT_A_CHECKS = \ $(THIRD_PARTY_GETOPT_A).pkg \ $(THIRD_PARTY_GETOPT_A_HDRS:%=o/$(MODE)/%.ok) THIRD_PARTY_GETOPT_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_INTRIN \ LIBC_NEXGEN32E \ LIBC_STR \ LIBC_STUBS THIRD_PARTY_GETOPT_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_GETOPT_A_DIRECTDEPS),$($(x)))) $(THIRD_PARTY_GETOPT_A): \ third_party/getopt/ \ $(THIRD_PARTY_GETOPT_A).pkg \ $(THIRD_PARTY_GETOPT_A_OBJS) $(THIRD_PARTY_GETOPT_A).pkg: \ $(THIRD_PARTY_GETOPT_A_OBJS) \ $(foreach x,$(THIRD_PARTY_GETOPT_A_DIRECTDEPS),$($(x)_A).pkg) THIRD_PARTY_GETOPT_LIBS = $(foreach x,$(THIRD_PARTY_GETOPT_ARTIFACTS),$($(x))) THIRD_PARTY_GETOPT_SRCS = $(foreach x,$(THIRD_PARTY_GETOPT_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_GETOPT_HDRS = $(foreach x,$(THIRD_PARTY_GETOPT_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_GETOPT_CHECKS = $(foreach x,$(THIRD_PARTY_GETOPT_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_GETOPT_OBJS = $(foreach x,$(THIRD_PARTY_GETOPT_ARTIFACTS),$($(x)_OBJS)) $(THIRD_PARTY_GETOPT_OBJS): third_party/getopt/getopt.mk .PHONY: o/$(MODE)/third_party/getopt o/$(MODE)/third_party/getopt: $(THIRD_PARTY_GETOPT_CHECKS)
1,887
46
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/getopt.3
.\" Copyright (c) 1988, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $OpenBSD: getopt.3,v 1.46 2016/01/04 19:43:13 tb Exp $ .\" .Dd $Mdocdate: January 4 2016 $ .Dt GETOPT 3 .Os .Sh NAME .Nm getopt .Nd get option character from command line argument list .Sh SYNOPSIS .In unistd.h .Vt extern char *optarg; .Vt extern int opterr; .Vt extern int optind; .Vt extern int optopt; .Vt extern int optreset; .Ft int .Fn getopt "int argc" "char * const *argv" "const char *optstring" .Sh DESCRIPTION The .Fn getopt function incrementally parses a command line argument list .Fa argv and returns the next .Em known option character. An option character is .Em known if it has been specified in the string of accepted option characters, .Fa optstring . .Pp The option string .Fa optstring may contain the following elements: individual characters, characters followed by a colon, and characters followed by two colons. A character followed by a single colon indicates that an argument is to follow the option on the command line. Two colons indicates that the argument is optional \- this is an extension not covered by POSIX. For example, an option string .Qq x recognizes an option .Fl x , and an option string .Qq Li x: recognizes an option and argument .Fl x Ar argument . It does not matter to .Fn getopt if a following argument has leading whitespace; except in the case where the argument is optional, denoted with two colons, no leading whitespace is permitted. .Pp On return from .Fn getopt , .Va optarg points to an option argument, if it is anticipated, and the variable .Va optind contains the index to the next .Fa argv argument for a subsequent call to .Fn getopt . .Pp The variables .Va opterr and .Va optind are both initialized to 1. The .Va optind variable may be set to another value larger than 0 before a set of calls to .Fn getopt in order to skip over more or less .Fa argv entries. An .Va optind value of 0 is reserved for compatibility with GNU .Fn getopt . .Pp In order to use .Fn getopt to evaluate multiple sets of arguments, or to evaluate a single set of arguments multiple times, the variable .Va optreset must be set to 1 before the second and each additional set of calls to .Fn getopt , and the variable .Va optind must be reinitialized. .Pp The .Fn getopt function returns \-1 when the argument list is exhausted. The interpretation of options in the argument list may be cancelled by the option .Ql -- (double dash) which causes .Fn getopt to signal the end of argument processing and return \-1. When all options have been processed (i.e., up to the first non-option argument), .Fn getopt returns \-1. .Sh RETURN VALUES The .Fn getopt function returns the next known option character in .Fa optstring . If .Fn getopt encounters a character not found in .Fa optstring or if it detects a missing option argument, it returns .Sq \&? (question mark). If .Fa optstring has a leading .Sq \&: then a missing option argument causes .Sq \&: to be returned instead of .Sq \&? . In either case, the variable .Va optopt is set to the character that caused the error. The .Fn getopt function returns \-1 when the argument list is exhausted. .Sh EXAMPLES The following code accepts the options .Fl b and .Fl f Ar argument and adjusts .Va argc and .Va argv after option argument processing has completed. .Bd -literal -offset indent int bflag, ch, fd; bflag = 0; while ((ch = getopt(argc, argv, "bf:")) != -1) { switch (ch) { case 'b': bflag = 1; break; case 'f': if ((fd = open(optarg, O_RDONLY, 0)) == -1) err(1, "%s", optarg); break; default: usage(); } } argc -= optind; argv += optind; .Ed .Sh DIAGNOSTICS If the .Fn getopt function encounters a character not found in the string .Fa optstring or detects a missing option argument, it writes an error message to .Em stderr and returns .Ql \&? . Setting .Va opterr to a zero will disable these error messages. If .Fa optstring has a leading .Ql \&: then a missing option argument causes a .Ql \&: to be returned in addition to suppressing any error messages. .Pp Option arguments are allowed to begin with .Ql - ; this is reasonable but reduces the amount of error checking possible. .Sh SEE ALSO .Xr getopt 1 , .Xr getopt_long 3 , .Xr getsubopt 3 .Sh STANDARDS The .Fn getopt function implements a superset of the functionality specified by .St -p1003.1 . .Pp The following extensions are supported: .Bl -bullet .It The .Va optreset variable was added to make it possible to call the .Fn getopt function multiple times. .It If the .Va optind variable is set to 0, .Fn getopt will behave as if the .Va optreset variable has been set. This is for compatibility with .Tn GNU .Fn getopt . New code should use .Va optreset instead. .It If the first character of .Fa optstring is a plus sign .Pq Ql + , it will be ignored. This is for compatibility with .Tn GNU .Fn getopt . .It If the first character of .Fa optstring is a dash .Pq Ql - , non-options will be returned as arguments to the option character .Ql \e1 . This is for compatibility with .Tn GNU .Fn getopt . .It A single dash .Pq Ql - may be specified as a character in .Fa optstring , however it should .Em never have an argument associated with it. This allows .Fn getopt to be used with programs that expect .Ql - as an option flag. This practice is wrong, and should not be used in any current development. It is provided for backward compatibility .Em only . Care should be taken not to use .Ql - as the first character in .Fa optstring to avoid a semantic conflict with .Tn GNU .Fn getopt semantics (see above). By default, a single dash causes .Fn getopt to return \-1. .El .Pp Historic .Bx versions of .Fn getopt set .Fa optopt to the last option character processed. However, this conflicts with .St -p1003.1 which stipulates that .Fa optopt be set to the last character that caused an error. .Sh HISTORY The .Fn getopt function appeared in .Bx 4.3 . .Sh BUGS The .Fn getopt function was once specified to return .Dv EOF instead of \-1. This was changed by .St -p1003.2-92 to decouple .Fn getopt from .In stdio.h . .Pp It is possible to handle digits as option letters. This allows .Fn getopt to be used with programs that expect a number .Pq Dq Li \-3 as an option. This practice is wrong, and should not be used in any current development. It is provided for backward compatibility .Em only . The following code fragment works in most cases and can handle mixed number and letter arguments. .Bd -literal -offset indent int aflag = 0, bflag = 0, ch, lastch = '\e0'; int length = -1, newarg = 1, prevoptind = 1; while ((ch = getopt(argc, argv, "0123456789ab")) != -1) { switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (newarg || !isdigit(lastch)) length = 0; else if (length > INT_MAX / 10) usage(); length = (length * 10) + (ch - '0'); break; case 'a': aflag = 1; break; case 'b': bflag = 1; break; default: usage(); } lastch = ch; newarg = optind != prevoptind; prevoptind = optind; } .Ed
8,608
364
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/getsubopt.c
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ │vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright (c) 1990, 1993 │ │ The Regents of the University of California. All rights reserved. │ │ │ │ Redistribution and use in source and binary forms, with or without │ │ modification, are permitted provided that the following conditions │ │ are met: │ │ 1. Redistributions of source code must retain the above copyright │ │ notice, this list of conditions and the following disclaimer. │ │ 2. Redistributions in binary form must reproduce the above copyright │ │ notice, this list of conditions and the following disclaimer in the │ │ documentation and/or other materials provided with the distribution. │ │ 3. Neither the name of the University nor the names of its contributors │ │ may be used to endorse or promote products derived from this software │ │ without specific prior written permission. │ │ │ │ THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND │ │ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE │ │ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE │ │ ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE │ │ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL │ │ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS │ │ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) │ │ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT │ │ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY │ │ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF │ │ SUCH DAMAGE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/str/str.h" #include "third_party/getopt/getopt.h" // clang-format off /* * The SVID interface to getsubopt provides no way of figuring out which * part of the suboptions list wasn't matched. This makes error messages * tricky... The extern variable suboptarg is a pointer to the token * which didn't match. */ char *suboptarg; int getsubopt(char **optionp, char * const *tokens, char **valuep) { int cnt; char *p; suboptarg = *valuep = NULL; if (!optionp || !*optionp) return(-1); /* skip leading white-space, commas */ for (p = *optionp; *p && (*p == ',' || *p == ' ' || *p == '\t'); ++p); if (!*p) { *optionp = p; return(-1); } /* save the start of the token, and skip the rest of the token. */ for (suboptarg = p; *++p && *p != ',' && *p != '=' && *p != ' ' && *p != '\t';); if (*p) { /* * If there's an equals sign, set the value pointer, and * skip over the value part of the token. Terminate the * token. */ if (*p == '=') { *p = '\0'; for (*valuep = ++p; *p && *p != ',' && *p != ' ' && *p != '\t'; ++p); if (*p) *p++ = '\0'; } else *p++ = '\0'; /* Skip any whitespace or commas after this token. */ for (; *p && (*p == ',' || *p == ' ' || *p == '\t'); ++p); } /* set optionp for next round. */ *optionp = p; for (cnt = 0; *tokens; ++tokens, ++cnt) if (!strcmp(suboptarg, *tokens)) return(cnt); return(-1); }
4,283
92
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/getopt.h
#ifndef COSMOPOLITAN_THIRD_PARTY_GETOPT_GETOPT_H_ #define COSMOPOLITAN_THIRD_PARTY_GETOPT_GETOPT_H_ #define no_argument 0 #define required_argument 1 #define optional_argument 2 #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ extern char *optarg; extern int optind, opterr, optopt, optreset; int getopt(int, char *const[], const char *); int getsubopt(char **, char *const *, char **); struct option { const char *name; int has_arg; int *flag; int val; }; int getopt_long(int, char *const *, const char *, const struct option *, int *); int getopt_long_only(int, char *const *, const char *, const struct option *, int *); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_GETOPT_GETOPT_H_ */
799
31
jart/cosmopolitan
false
cosmopolitan/third_party/getopt/getopt.c
/* $NetBSD: getopt.c,v 1.26 2003/08/07 16:43:40 agc Exp $ */ /* * Copyright (c) 1987, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)getopt.c 8.3 (Berkeley) 4/27/95 * $FreeBSD: src/lib/libc/stdlib/getopt.c,v 1.8 2007/01/09 00:28:10 imp Exp $ * $DragonFly: src/lib/libc/stdlib/getopt.c,v 1.7 2005/11/20 12:37:48 swildner */ #include "libc/calls/calls.h" #include "libc/runtime/runtime.h" #include "libc/str/str.h" asm(".ident\t\"\\n\ getopt (BSD-3)\\n\ Copyright 1987, 1993, 1994 The Regents of the University of California\""); asm(".include \"libc/disclaimer.inc\""); #define BADCH (int)'?' #define BADARG (int)':' /** * If error message should be printed. * @see getopt() */ int opterr; /** * Index into parent argv vector. * @see getopt() */ int optind; /** * Character checked for validity. * @see getopt() */ int optopt; /** * Reset getopt. * @see getopt() */ int optreset; /** * Argument associated with option. * @see getopt() */ char *optarg; char *getopt_place; static char kGetoptEmsg[1]; static void getopt_print_badch(const char *s) { char b1[512]; char b2[8] = " -- "; b1[0] = 0; if (program_invocation_name) { strlcat(b1, program_invocation_name, sizeof(b1)); strlcat(b1, ": ", sizeof(b1)); } strlcat(b1, s, sizeof(b1)); b2[4] = optopt; b2[5] = '\n'; b2[6] = 0; strlcat(b1, b2, sizeof(b1)); write(2, b1, strlen(b1)); } /** * Parses argc/argv argument vector, e.g. * * while ((opt = getopt(argc, argv, "hvx:")) != -1) { * switch (opt) { * case 'x': * x = atoi(optarg); * break; * case 'v': * ++verbose; * break; * case 'h': * PrintUsage(EXIT_SUCCESS, stdout); * default: * PrintUsage(EX_USAGE, stderr); * } * } * * @see optind * @see optarg */ int getopt(int nargc, char *const nargv[], const char *ostr) { char *oli; /* option letter list index */ static bool once; if (!once) { opterr = 1; optind = 1; getopt_place = kGetoptEmsg; once = true; } /* * Some programs like cvs expect optind = 0 to trigger * a reset of getopt. */ if (optind == 0) optind = 1; if (optreset || *getopt_place == 0) { /* update scanning pointer */ optreset = 0; getopt_place = nargv[optind]; if (optind >= nargc || *getopt_place++ != '-') { /* Argument is absent or is not an option */ getopt_place = kGetoptEmsg; return -1; } optopt = *getopt_place++; if (optopt == '-' && *getopt_place == 0) { /* "--" => end of options */ ++optind; getopt_place = kGetoptEmsg; return -1; } if (optopt == 0) { /* Solitary '-', treat as a '-' option if the program (eg su) is looking for it. */ getopt_place = kGetoptEmsg; if (strchr(ostr, '-') == NULL) return -1; optopt = '-'; } } else { optopt = *getopt_place++; } /* See if option letter is one the caller wanted... */ if (optopt == ':' || (oli = strchr(ostr, optopt)) == NULL) { if (*getopt_place == 0) ++optind; if (opterr && *ostr != ':') getopt_print_badch("illegal option"); return BADCH; } /* Does this option need an argument? */ if (oli[1] != ':') { /* don't need argument */ optarg = NULL; if (*getopt_place == 0) ++optind; } else { /* Option-argument is either the rest of this argument or the entire next argument. */ if (*getopt_place) { optarg = getopt_place; } else if (nargc > ++optind) { optarg = nargv[optind]; } else { /* option-argument absent */ getopt_place = kGetoptEmsg; if (*ostr == ':') return BADARG; if (opterr) getopt_print_badch("option requires an argument"); return BADCH; } getopt_place = kGetoptEmsg; ++optind; } return optopt; /* return option letter */ }
5,418
185
jart/cosmopolitan
false
cosmopolitan/third_party/tr/next.c
/* $OpenBSD: str.c,v 1.14 2021/11/02 03:09:15 cheloha Exp $ */ /* $NetBSD: str.c,v 1.7 1995/08/31 22:13:47 jtc Exp $ */ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/assert.h" #include "libc/calls/typedef/u.h" #include "libc/fmt/conv.h" #include "libc/log/bsd.h" #include "libc/mem/alg.h" #include "libc/mem/mem.h" #include "libc/str/str.h" #include "third_party/tr/extern.h" // clang-format off static int backslash(STR *); static int bracket(STR *); static int c_class(const void *, const void *); static void genclass(STR *); static void genequiv(STR *); static int genrange(STR *); static void genseq(STR *); int next(s) STR *s; { int ch; switch (s->state) { case EOS: return (0); case INFINITE: return (1); case NORMAL: switch (ch = *s->str) { case '\0': s->state = EOS; return (0); case '\\': s->lastch = backslash(s); break; case '[': if (bracket(s)) return (next(s)); /* FALLTHROUGH */ default: ++s->str; s->lastch = ch; break; } /* We can start a range at any time. */ if (s->str[0] == '-' && genrange(s)) return (next(s)); return (1); case RANGE: if (s->cnt-- == 0) { s->state = NORMAL; return (next(s)); } ++s->lastch; return (1); case SEQUENCE: if (s->cnt-- == 0) { s->state = NORMAL; return (next(s)); } return (1); case SET: if ((s->lastch = s->set[s->cnt++]) == OOBCH) { s->state = NORMAL; return (next(s)); } return (1); default: return 0; } /* NOTREACHED */ } static int bracket(s) STR *s; { char *p; switch (s->str[1]) { case ':': /* "[:class:]" */ if ((p = strstr((char *)s->str + 2, ":]")) == NULL) return (0); *p = '\0'; s->str += 2; genclass(s); s->str = (unsigned char *)p + 2; return (1); case '=': /* "[=equiv=]" */ if ((p = strstr((char *)s->str + 2, "=]")) == NULL) return (0); s->str += 2; genequiv(s); return (1); default: /* "[\###*n]" or "[#*n]" */ if ((p = strpbrk((char *)s->str + 2, "*]")) == NULL) return (0); if (p[0] != '*' || strchr(p, ']') == NULL) return (0); s->str += 1; genseq(s); return (1); } /* NOTREACHED */ } typedef struct { char *name; int (*func)(int); int *set; } CLASS; static CLASS classes[] = { { "alnum", isalnum, }, { "alpha", isalpha, }, { "blank", isblank, }, { "cntrl", iscntrl, }, { "digit", isdigit, }, { "graph", isgraph, }, { "lower", islower, }, { "print", isprint, }, { "punct", ispunct, }, { "space", isspace, }, { "upper", isupper, }, { "xdigit", isxdigit, }, }; static void genclass(STR *s) { CLASS *cp, tmp; size_t len; int i; tmp.name = (char *)s->str; if ((cp = (CLASS *)bsearch(&tmp, classes, sizeof(classes) / sizeof(CLASS), sizeof(CLASS), c_class)) == NULL) errx(1, "unknown class %s", s->str); /* * Generate the set of characters in the class if we haven't * already done so. */ if (cp->set == NULL) { cp->set = reallocarray(NULL, NCHARS + 1, sizeof(*cp->set)); if (cp->set == NULL) err(1, NULL); len = 0; for (i = 0; i < NCHARS; i++) { if (cp->func(i)) { cp->set[len] = i; len++; } } cp->set[len] = OOBCH; len++; cp->set = reallocarray(cp->set, len, sizeof(*cp->set)); if (cp->set == NULL) err(1, NULL); } s->cnt = 0; s->state = SET; s->set = cp->set; } static int c_class(a, b) const void *a, *b; { return (strcmp(((CLASS *)a)->name, ((CLASS *)b)->name)); } /* * English doesn't have any equivalence classes, so for now * we just syntax check and grab the character. */ static void genequiv(s) STR *s; { if (*s->str == '\\') { s->equiv[0] = backslash(s); if (*s->str != '=') errx(1, "misplaced equivalence equals sign"); } else { s->equiv[0] = s->str[0]; if (s->str[1] != '=') errx(1, "misplaced equivalence equals sign"); } s->str += 2; s->cnt = 0; s->state = SET; s->set = s->equiv; } static int genrange(s) STR *s; { int stopval; unsigned char *savestart; savestart = s->str; stopval = *++s->str == '\\' ? backslash(s) : *s->str++; if (stopval < (u_char)s->lastch) { s->str = savestart; return (0); } s->cnt = stopval - s->lastch + 1; s->state = RANGE; --s->lastch; return (1); } static void genseq(s) STR *s; { char *ep; if (s->which == STRING1) errx(1, "sequences only valid in string2"); if (*s->str == '\\') s->lastch = backslash(s); else s->lastch = *s->str++; if (*s->str != '*') errx(1, "misplaced sequence asterisk"); switch (*++s->str) { case '\\': s->cnt = backslash(s); break; case ']': s->cnt = 0; ++s->str; break; default: if (isdigit(*s->str)) { s->cnt = strtol((char *)s->str, &ep, 0); if (*ep == ']') { s->str = (unsigned char *)ep + 1; break; } } errx(1, "illegal sequence count"); /* NOTREACHED */ } s->state = s->cnt ? SEQUENCE : INFINITE; } /* * Translate \??? into a character. Up to 3 octal digits, if no digits either * an escape code or a literal character. */ static int backslash(STR *s) { size_t i; int ch, val; _unassert(*s->str == '\\'); s->str++; /* Empty escapes become plain backslashes. */ if (*s->str == '\0') { s->state = EOS; return ('\\'); } val = 0; for (i = 0; i < 3; i++) { if (s->str[i] < '0' || '7' < s->str[i]) break; val = val * 8 + s->str[i] - '0'; } if (i > 0) { if (val > UCHAR_MAX) errx(1, "octal value out of range: %d", val); s->str += i; return (val); } ch = *s->str++; switch (ch) { case 'a': /* escape characters */ return ('\7'); case 'b': return ('\b'); case 'f': return ('\f'); case 'n': return ('\n'); case 'r': return ('\r'); case 't': return ('\t'); case 'v': return ('\13'); default: /* \x" -> x */ return (ch); } }
7,293
340
jart/cosmopolitan
false
cosmopolitan/third_party/tr/extern.h
#ifndef COSMOPOLITAN_THIRD_PARTY_TR_EXTERN_H_ #define COSMOPOLITAN_THIRD_PARTY_TR_EXTERN_H_ #include "libc/limits.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ typedef struct { enum { STRING1, STRING2 } which; enum { EOS, INFINITE, NORMAL, RANGE, SEQUENCE, SET } state; int cnt; /* character count */ int lastch; /* last character */ int equiv[2]; /* equivalence set */ int *set; /* set of characters */ unsigned char *str; /* user's string */ } STR; #define NCHARS (UCHAR_MAX + 1) /* Number of possible characters. */ #define OOBCH (UCHAR_MAX + 1) /* Out of band character value. */ int next(STR *); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_TR_EXTERN_H_ */
790
25
jart/cosmopolitan
false
cosmopolitan/third_party/tr/tr.c
/* $OpenBSD: tr.c,v 1.21 2022/02/11 16:09:21 cheloha Exp $ */ /* $NetBSD: tr.c,v 1.5 1995/08/31 22:13:48 jtc Exp $ */ /* * Copyright (c) 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libc/calls/calls.h" #include "libc/log/bsd.h" #include "libc/runtime/runtime.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" #include "third_party/getopt/getopt.h" #include "third_party/tr/cmd.h" #include "third_party/tr/extern.h" // clang-format off int delete[NCHARS], squeeze[NCHARS]; int translate[NCHARS] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* ASCII */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, }; STR s1 = { STRING1, NORMAL, 0, OOBCH, { 0, OOBCH }, NULL, NULL }; STR s2 = { STRING2, NORMAL, 0, OOBCH, { 0, OOBCH }, NULL, NULL }; static void setup(int *, char *, STR *, int); static void usage(void); int _tr(int argc, char *argv[]) { int ch, cnt, lastch, *p; int cflag, dflag, sflag; if (pledge("stdio", NULL) == -1) err(1, "pledge"); cflag = dflag = sflag = 0; while ((ch = getopt(argc, argv, "Ccds")) != -1) switch(ch) { case 'C': case 'c': cflag = 1; break; case 'd': dflag = 1; break; case 's': sflag = 1; break; case '?': default: usage(); } argc -= optind; argv += optind; if (argc < 1 || argc > 2) usage(); /* * tr -ds [-Cc] string1 string2 * Delete all characters (or complemented characters) in string1. * Squeeze all characters in string2. */ if (dflag && sflag) { if (argc != 2) usage(); setup(delete, argv[0], &s1, cflag); setup(squeeze, argv[1], &s2, 0); for (lastch = OOBCH; (ch = getchar()) != EOF;) if (!delete[ch] && (!squeeze[ch] || lastch != ch)) { lastch = ch; (void)putchar(ch); } exit(0); } /* * tr -d [-Cc] string1 * Delete all characters (or complemented characters) in string1. */ if (dflag) { if (argc != 1) usage(); setup(delete, argv[0], &s1, cflag); while ((ch = getchar()) != EOF) if (!delete[ch]) (void)putchar(ch); exit(0); } /* * tr -s [-Cc] string1 * Squeeze all characters (or complemented characters) in string1. */ if (sflag && argc == 1) { setup(squeeze, argv[0], &s1, cflag); for (lastch = OOBCH; (ch = getchar()) != EOF;) if (!squeeze[ch] || lastch != ch) { lastch = ch; (void)putchar(ch); } exit(0); } /* * tr [-Ccs] string1 string2 * Replace all characters (or complemented characters) in string1 with * the character in the same position in string2. If the -s option is * specified, squeeze all the characters in string2. */ if (argc != 2) usage(); s1.str = (unsigned char *)argv[0]; s2.str = (unsigned char *)argv[1]; if (cflag) for (cnt = NCHARS, p = translate; cnt--;) *p++ = OOBCH; if (!next(&s2)) errx(1, "empty string2"); /* If string2 runs out of characters, use the last one specified. */ ch = s2.lastch; if (sflag) while (next(&s1)) { translate[s1.lastch] = ch = s2.lastch; squeeze[ch] = 1; (void)next(&s2); } else while (next(&s1)) { translate[s1.lastch] = ch = s2.lastch; (void)next(&s2); } if (cflag) for (cnt = 0, p = translate; cnt < NCHARS; ++p, ++cnt) *p = *p == OOBCH ? ch : cnt; if (sflag) for (lastch = OOBCH; (ch = getchar()) != EOF;) { ch = translate[ch]; if (!squeeze[ch] || lastch != ch) { lastch = ch; (void)putchar(ch); } } else while ((ch = getchar()) != EOF) (void)putchar(translate[ch]); exit (0); } static void setup(int *table, char *arg, STR *str, int cflag) { int cnt, *p; str->str = (unsigned char *)arg; bzero(table, NCHARS * sizeof(int)); while (next(str)) table[str->lastch] = 1; if (cflag) for (p = table, cnt = NCHARS; cnt--; ++p) *p = !*p; } static void usage(void) { (fprintf)(stderr, "usage: tr [-Ccs] string1 string2\n" " tr [-Cc] -d string1\n" " tr [-Cc] -s string1\n" " tr [-Cc] -ds string1 string2\n"); exit(1); }
6,944
241
jart/cosmopolitan
false
cosmopolitan/third_party/tr/cmd.h
#ifndef COSMOPOLITAN_THIRD_PARTY_TR_CMD_H_ #define COSMOPOLITAN_THIRD_PARTY_TR_CMD_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ int _tr(int, char *[]); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_TR_CMD_H_ */
287
11
jart/cosmopolitan
false
cosmopolitan/third_party/tr/cmd.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "third_party/tr/cmd.h" int main(int argc, char *argv[]) { return _tr(argc, argv); }
1,932
24
jart/cosmopolitan
false
cosmopolitan/third_party/tr/tr.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_TR THIRD_PARTY_TR_ARTIFACTS += THIRD_PARTY_TR_A THIRD_PARTY_TR = $(THIRD_PARTY_TR_DEPS) $(THIRD_PARTY_TR_A) THIRD_PARTY_TR_A = o/$(MODE)/third_party/tr/tr.a THIRD_PARTY_TR_FILES := $(wildcard third_party/tr/*) THIRD_PARTY_TR_HDRS = $(filter %.h,$(THIRD_PARTY_TR_FILES)) THIRD_PARTY_TR_INCS = $(filter %.inc,$(THIRD_PARTY_TR_FILES)) THIRD_PARTY_TR_SRCS = $(filter %.c,$(THIRD_PARTY_TR_FILES)) THIRD_PARTY_TR_OBJS = $(THIRD_PARTY_TR_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_TR_DIRECTDEPS = \ LIBC_CALLS \ LIBC_FMT \ LIBC_INTRIN \ LIBC_LOG \ LIBC_MEM \ LIBC_NEXGEN32E \ LIBC_RUNTIME \ LIBC_STDIO \ LIBC_STR \ LIBC_STUBS \ THIRD_PARTY_GETOPT THIRD_PARTY_TR_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_TR_DIRECTDEPS),$($(x)))) THIRD_PARTY_TR_CHECKS = \ $(THIRD_PARTY_TR_A).pkg \ $(THIRD_PARTY_TR_HDRS:%=o/$(MODE)/%.ok) $(THIRD_PARTY_TR_A): \ third_party/tr/ \ $(THIRD_PARTY_TR_A).pkg \ $(THIRD_PARTY_TR_OBJS) $(THIRD_PARTY_TR_A).pkg: \ $(THIRD_PARTY_TR_OBJS) \ $(foreach x,$(THIRD_PARTY_TR_DIRECTDEPS),$($(x)_A).pkg) o/$(MODE)/third_party/tr/tr.com.dbg: \ $(THIRD_PARTY_TR) \ o/$(MODE)/third_party/tr/tr.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) THIRD_PARTY_TR_LIBS = $(THIRD_PARTY_TR_A) THIRD_PARTY_TR_BINS = $(THIRD_PARTY_TR_COMS) $(THIRD_PARTY_TR_COMS:%=%.dbg) THIRD_PARTY_TR_COMS = o/$(MODE)/third_party/tr/tr.com $(THIRD_PARTY_TR_OBJS): $(BUILD_FILES) third_party/tr/tr.mk .PHONY: o/$(MODE)/third_party/tr o/$(MODE)/third_party/tr: \ $(THIRD_PARTY_TR_BINS) \ $(THIRD_PARTY_TR_CHECKS)
1,812
60
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/xxhash.c
/* clang-format off */ /* * xxHash - Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet * * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at : * - xxHash homepage: http://www.xxhash.com * - xxHash source repository : https://github.com/Cyan4973/xxHash */ /* ************************************* * Tuning parameters ***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. * It can generate buggy code on targets which do not support unaligned memory accesses. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://stackoverflow.com/a/32095106/646947 for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. * By default, this option is disabled. To enable it, uncomment below define : */ /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ /*!XXH_FORCE_NATIVE_FORMAT : * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. * Results are therefore identical for little-endian and big-endian CPU. * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. * Should endian-independence be of no importance for your application, you may set the #define below to 1, * to improve speed for Big-endian CPU. * This option has no impact on Little_Endian CPU. */ #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ # define XXH_FORCE_NATIVE_FORMAT 0 #endif /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. * The check costs one initial branch per hash; set to 0 when the input data * is guaranteed to be aligned. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 # endif #endif /* ************************************* * Includes & Memory related functions ***************************************/ /*! Modify the local functions below should you wish to use some other memory routines * for malloc(), free() */ #include "libc/mem/mem.h" static void* XXH_malloc(size_t s) { return malloc(s); } static void XXH_free (void* p) { free(p); } /*! and for memcpy() */ #include "libc/str/str.h" static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } #define XXH_STATIC_LINKING_ONLY #include "third_party/lz4cli/xxhash.h" /* ************************************* * Compiler Specific Options ***************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif #ifndef XXH_FORCE_INLINE # ifdef _MSC_VER /* Visual Studio */ # define XXH_FORCE_INLINE static __forceinline # else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define XXH_FORCE_INLINE static inline __attribute__((always_inline)) # else # define XXH_FORCE_INLINE static inline # endif # else # define XXH_FORCE_INLINE static # endif /* __STDC_VERSION__ */ # endif /* _MSC_VER */ #endif /* XXH_FORCE_INLINE */ /* ************************************* * Basic Types ***************************************/ #ifndef MEM_MODULE # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) typedef uint8_t BYTE; typedef uint16_t U16; typedef uint32_t U32; typedef int32_t S32; # else typedef unsigned char BYTE; typedef unsigned short U16; typedef unsigned int U32; typedef signed int S32; # endif #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; } __attribute__((packed)) unalign; static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U32 XXH_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* **************************************** * Compiler-specific Functions and Macros ******************************************/ #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ #if defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) #else # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #endif #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong #elif XXH_GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 #else static U32 XXH_swap32 (U32 x) { return ((x << 24) & 0xff000000 ) | ((x << 8) & 0x00ff0000 ) | ((x >> 8) & 0x0000ff00 ) | ((x >> 24) & 0x000000ff ); } #endif /* ************************************* * Architecture Macros ***************************************/ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ #ifndef XXH_CPU_LITTLE_ENDIAN static const int g_one = 1; # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) #endif /* *************************** * Memory reads *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; XXH_FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); else return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); } XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); } static U32 XXH_readBE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); } /* ************************************* * Macros ***************************************/ #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ******************************************************************* * 32-bits hash functions *********************************************************************/ static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_2 = 2246822519U; static const U32 PRIME32_3 = 3266489917U; static const U32 PRIME32_4 = 668265263U; static const U32 PRIME32_5 = 374761393U; static U32 XXH32_round(U32 seed, U32 input) { seed += input * PRIME32_2; seed = XXH_rotl32(seed, 13); seed *= PRIME32_1; return seed; } XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; U32 h32; #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)16; } #endif if (len>=16) { const BYTE* const limit = bEnd - 16; U32 v1 = seed + PRIME32_1 + PRIME32_2; U32 v2 = seed + PRIME32_2; U32 v3 = seed + 0; U32 v4 = seed - PRIME32_1; do { v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; } while (p<=limit); h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); } else { h32 = seed + PRIME32_5; } h32 += (U32) len; while (p+4<=bEnd) { h32 += XXH_get32bits(p) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; p+=4; } while (p<bEnd) { h32 += (*p) * PRIME32_5; h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; p++; } h32 ^= h32 >> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH32_state_t state; XXH32_reset(&state, seed); XXH32_update(&state, input, len); return XXH32_digest(&state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } /*====== Hash streaming ======*/ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) { memcpy(dstState, srcState, sizeof(*dstState)); } XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } XXH_FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len_32 += (unsigned)len; state->large_len |= (len>=16) | (state->total_len_32>=16); if (state->memsize + len < 16) { /* fill in tmp buffer */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); state->memsize += (unsigned)len; return XXH_OK; } if (state->memsize) { /* some data left from previous update */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); { const U32* p32 = state->mem32; state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; } p += 16-state->memsize; state->memsize = 0; } if (p <= bEnd-16) { const BYTE* const limit = bEnd - 16; U32 v1 = state->v1; U32 v2 = state->v2; U32 v3 = state->v3; U32 v4 = state->v4; do { v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_update_endian(state_in, input, len, XXH_littleEndian); else return XXH32_update_endian(state_in, input, len, XXH_bigEndian); } XXH_FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem32; const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; U32 h32; if (state->large_len) { h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); } else { h32 = state->v3 /* == seed */ + PRIME32_5; } h32 += state->total_len_32; while (p+4<=bEnd) { h32 += XXH_readLE32(p, endian) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4; p+=4; } while (p<bEnd) { h32 += (*p) * PRIME32_5; h32 = XXH_rotl32(h32, 11) * PRIME32_1; p++; } h32 ^= h32 >> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_digest_endian(state_in, XXH_littleEndian); else return XXH32_digest_endian(state_in, XXH_bigEndian); } /*====== Canonical representation ======*/ /*! Default XXH result types are basic unsigned 32 and 64 bits. * The canonical representation follows human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { return XXH_readBE32(src); } #ifndef XXH_NO_LONG_LONG /* ******************************************************************* * 64-bits hash functions *********************************************************************/ /*====== Memory access ======*/ #ifndef MEM_MODULE # define MEM_MODULE # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) typedef uint64_t U64; # else typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ # endif #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U64 XXH_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap64 _byteswap_uint64 #elif XXH_GCC_VERSION >= 403 # define XXH_swap64 __builtin_bswap64 #else static U64 XXH_swap64 (U64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif XXH_FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); else return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); } XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { return XXH_readLE64_align(ptr, endian, XXH_unaligned); } static U64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } /*====== xxh64 ======*/ static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_2 = 14029467366897019727ULL; static const U64 PRIME64_3 = 1609587929392839161ULL; static const U64 PRIME64_4 = 9650029242287828579ULL; static const U64 PRIME64_5 = 2870177450012600261ULL; static U64 XXH64_round(U64 acc, U64 input) { acc += input * PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= PRIME64_1; return acc; } static U64 XXH64_mergeRound(U64 acc, U64 val) { val = XXH64_round(0, val); acc ^= val; acc = acc * PRIME64_1 + PRIME64_4; return acc; } XXH_FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; U64 h64; #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)32; } #endif if (len>=32) { const BYTE* const limit = bEnd - 32; U64 v1 = seed + PRIME64_1 + PRIME64_2; U64 v2 = seed + PRIME64_2; U64 v3 = seed + 0; U64 v4 = seed - PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; } while (p<=limit); h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = seed + PRIME64_5; } h64 += (U64) len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_get64bits(p)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p<bEnd) { h64 ^= (*p) * PRIME64_5; h64 = XXH_rotl64(h64, 11) * PRIME64_1; p++; } h64 ^= h64 >> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; XXH64_reset(&state, seed); XXH64_update(&state, input, len); return XXH64_digest(&state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } /*====== Hash Streaming ======*/ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) { return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) { memcpy(dstState, srcState, sizeof(*dstState)); } XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } XXH_FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); state->memsize += (U32)len; return XXH_OK; } if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); p += 32-state->memsize; state->memsize = 0; } if (p+32 <= bEnd) { const BYTE* const limit = bEnd - 32; U64 v1 = state->v1; U64 v2 = state->v2; U64 v3 = state->v3; U64 v4 = state->v4; do { v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_update_endian(state_in, input, len, XXH_littleEndian); else return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } XXH_FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem64; const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; U64 h64; if (state->total_len >= 32) { U64 const v1 = state->v1; U64 const v2 = state->v2; U64 const v3 = state->v3; U64 const v4 = state->v4; h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = state->v3 + PRIME64_5; } h64 += (U64) state->total_len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p<bEnd) { h64 ^= (*p) * PRIME64_5; h64 = XXH_rotl64(h64, 11) * PRIME64_1; p++; } h64 ^= h64 >> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_digest_endian(state_in, XXH_littleEndian); else return XXH64_digest_endian(state_in, XXH_bigEndian); } /*====== Canonical representation ======*/ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { return XXH_readBE64(src); } #endif /* XXH_NO_LONG_LONG */
29,899
894
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/bench.c
/* clang-format off */ /* bench.c - Demo program to benchmark open-source compression algorithms Copyright (C) Yann Collet 2012-2016 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - LZ4 homepage : http://www.lz4.org - LZ4 source repository : https://github.com/lz4/lz4 */ /*-************************************ * Compiler options **************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************* * Includes ***************************************/ #include "third_party/lz4cli/platform.h" /* Compiler options */ #include "third_party/lz4cli/util.h" /* UTIL_GetFileSize, UTIL_sleep */ #include "libc/mem/mem.h" /* malloc, free */ #include "libc/str/str.h" /* memset */ #include "libc/stdio/stdio.h" /* fprintf, fopen, ftello */ #include "libc/time/time.h" /* clock_t, clock, CLOCKS_PER_SEC */ #include "libc/assert.h" #include "libc/runtime/runtime.h" /* assert */ #include "third_party/lz4cli/datagen.h" /* RDG_genBuffer */ #include "third_party/lz4cli/xxhash.h" #include "third_party/lz4cli/lz4.h" #define COMPRESSOR0 LZ4_compress_local static int LZ4_compress_local(const char* src, char* dst, int srcSize, int dstSize, int clevel) { int const acceleration = (clevel < 0) ? -clevel + 1 : 1; return LZ4_compress_fast(src, dst, srcSize, dstSize, acceleration); } #include "third_party/lz4cli/lz4hc.h" #define COMPRESSOR1 LZ4_compress_HC #define DEFAULTCOMPRESSOR COMPRESSOR0 #define LZ4_isError(errcode) (errcode==0) /* ************************************* * Constants ***************************************/ #ifndef LZ4_GIT_COMMIT_STRING # define LZ4_GIT_COMMIT_STRING "" #else # define LZ4_GIT_COMMIT_STRING LZ4_EXPAND_AND_QUOTE(LZ4_GIT_COMMIT) #endif #define NBSECONDS 3 #define TIMELOOP_MICROSEC 1*1000000ULL /* 1 second */ #define TIMELOOP_NANOSEC 1*1000000000ULL /* 1 second */ #define ACTIVEPERIOD_MICROSEC 70*1000000ULL /* 70 seconds */ #define COOLPERIOD_SEC 10 #define DECOMP_MULT 1 /* test decompression DECOMP_MULT times longer than compression */ #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31)); static U32 g_compressibilityDefault = 50; /* ************************************* * console display ***************************************/ #define DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } static U32 g_displayLevel = 2; /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \ { g_time = clock(); DISPLAY(__VA_ARGS__); \ if (g_displayLevel>=4) fflush(stdout); } } static clock_t g_time = 0; static clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; /* ************************************* * Exceptions ***************************************/ #ifndef DEBUG # define DEBUG 0 #endif #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); #define EXM_THROW(error, ...) \ { \ DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ DISPLAYLEVEL(1, "Error %i : ", error); \ DISPLAYLEVEL(1, __VA_ARGS__); \ DISPLAYLEVEL(1, "\n"); \ exit(error); \ } /* ************************************* * Benchmark Parameters ***************************************/ static U32 g_nbSeconds = NBSECONDS; static size_t g_blockSize = 0; int g_additionalParam = 0; int g_benchSeparately = 0; void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; } void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; } void BMK_setNbSeconds(unsigned nbSeconds) { g_nbSeconds = nbSeconds; DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbSeconds); } void BMK_setBlockSize(size_t blockSize) { g_blockSize = blockSize; } void BMK_setBenchSeparately(int separate) { g_benchSeparately = (separate!=0); } /* ******************************************************** * Bench functions **********************************************************/ typedef struct { const char* srcPtr; size_t srcSize; char* cPtr; size_t cRoom; size_t cSize; char* resPtr; size_t resSize; } blockParam_t; struct compressionParameters { int (*compressionFunction)(const char* src, char* dst, int srcSize, int dstSize, int cLevel); }; #ifndef MIN #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) #endif static int BMK_benchMem(const void* srcBuffer, size_t srcSize, const char* displayName, int cLevel, const size_t* fileSizes, U32 nbFiles) { size_t const blockSize = (g_blockSize>=32 ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ; U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles; blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t)); size_t const maxCompressedSize = LZ4_compressBound((int)srcSize) + (maxNbBlocks * 1024); /* add some room for safety */ void* const compressedBuffer = malloc(maxCompressedSize); void* const resultBuffer = malloc(srcSize); U32 nbBlocks; struct compressionParameters compP; int cfunctionId; /* checks */ if (!compressedBuffer || !resultBuffer || !blockTable) EXM_THROW(31, "allocation error : not enough memory"); /* init */ if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */ /* Init */ if (cLevel < LZ4HC_CLEVEL_MIN) cfunctionId = 0; else cfunctionId = 1; switch (cfunctionId) { #ifdef COMPRESSOR0 case 0 : compP.compressionFunction = COMPRESSOR0; break; #endif #ifdef COMPRESSOR1 case 1 : compP.compressionFunction = COMPRESSOR1; break; #endif default : compP.compressionFunction = DEFAULTCOMPRESSOR; } /* Init blockTable data */ { const char* srcPtr = (const char*)srcBuffer; char* cPtr = (char*)compressedBuffer; char* resPtr = (char*)resultBuffer; U32 fileNb; for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) { size_t remaining = fileSizes[fileNb]; U32 const nbBlocksforThisFile = (U32)((remaining + (blockSize-1)) / blockSize); U32 const blockEnd = nbBlocks + nbBlocksforThisFile; for ( ; nbBlocks<blockEnd; nbBlocks++) { size_t const thisBlockSize = MIN(remaining, blockSize); blockTable[nbBlocks].srcPtr = srcPtr; blockTable[nbBlocks].cPtr = cPtr; blockTable[nbBlocks].resPtr = resPtr; blockTable[nbBlocks].srcSize = thisBlockSize; blockTable[nbBlocks].cRoom = LZ4_compressBound((int)thisBlockSize); srcPtr += thisBlockSize; cPtr += blockTable[nbBlocks].cRoom; resPtr += thisBlockSize; remaining -= thisBlockSize; } } } /* warmimg up memory */ RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); /* Bench */ { U64 fastestC = (U64)(-1LL), fastestD = (U64)(-1LL); U64 const crcOrig = XXH64(srcBuffer, srcSize, 0); UTIL_time_t coolTime; U64 const maxTime = (g_nbSeconds * TIMELOOP_NANOSEC) + 100; U32 nbCompressionLoops = (U32)((5 MB) / (srcSize+1)) + 1; /* conservative initial compression speed estimate */ U32 nbDecodeLoops = (U32)((200 MB) / (srcSize+1)) + 1; /* conservative initial decode speed estimate */ U64 totalCTime=0, totalDTime=0; U32 cCompleted=0, dCompleted=0; # define NB_MARKS 4 const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" }; U32 markNb = 0; size_t cSize = 0; double ratio = 0.; coolTime = UTIL_getTime(); DISPLAYLEVEL(2, "\r%79s\r", ""); while (!cCompleted || !dCompleted) { /* overheat protection */ if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) { DISPLAYLEVEL(2, "\rcooling down ... \r"); UTIL_sleep(COOLPERIOD_SEC); coolTime = UTIL_getTime(); } /* Compression */ DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize); if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */ UTIL_sleepMilli(1); /* give processor time to other processes */ UTIL_waitForNextTick(); if (!cCompleted) { /* still some time to do compression tests */ UTIL_time_t const clockStart = UTIL_getTime(); U32 nbLoops; for (nbLoops=0; nbLoops < nbCompressionLoops; nbLoops++) { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) { size_t const rSize = compP.compressionFunction(blockTable[blockNb].srcPtr, blockTable[blockNb].cPtr, (int)blockTable[blockNb].srcSize, (int)blockTable[blockNb].cRoom, cLevel); if (LZ4_isError(rSize)) EXM_THROW(1, "LZ4_compress() failed"); blockTable[blockNb].cSize = rSize; } } { U64 const clockSpan = UTIL_clockSpanNano(clockStart); if (clockSpan > 0) { if (clockSpan < fastestC * nbCompressionLoops) fastestC = clockSpan / nbCompressionLoops; assert(fastestC > 0); nbCompressionLoops = (U32)(TIMELOOP_NANOSEC / fastestC) + 1; /* aim for ~1sec */ } else { assert(nbCompressionLoops < 40000000); /* avoid overflow */ nbCompressionLoops *= 100; } totalCTime += clockSpan; cCompleted = totalCTime>maxTime; } } cSize = 0; { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) cSize += blockTable[blockNb].cSize; } cSize += !cSize; /* avoid div by 0 */ ratio = (double)srcSize / (double)cSize; markNb = (markNb+1) % NB_MARKS; DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r", marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio, ((double)srcSize / fastestC) * 1000 ); (void)fastestD; (void)crcOrig; /* unused when decompression disabled */ #if 1 /* Decompression */ if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */ UTIL_sleepMilli(5); /* give processor time to other processes */ UTIL_waitForNextTick(); if (!dCompleted) { UTIL_time_t const clockStart = UTIL_getTime(); U32 nbLoops; for (nbLoops=0; nbLoops < nbDecodeLoops; nbLoops++) { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) { size_t const regenSize = LZ4_decompress_safe(blockTable[blockNb].cPtr, blockTable[blockNb].resPtr, (int)blockTable[blockNb].cSize, (int)blockTable[blockNb].srcSize); if (LZ4_isError(regenSize)) { DISPLAY("LZ4_decompress_safe() failed on block %u \n", blockNb); break; } blockTable[blockNb].resSize = regenSize; } } { U64 const clockSpan = UTIL_clockSpanNano(clockStart); if (clockSpan > 0) { if (clockSpan < fastestD * nbDecodeLoops) fastestD = clockSpan / nbDecodeLoops; assert(fastestD > 0); nbDecodeLoops = (U32)(TIMELOOP_NANOSEC / fastestD) + 1; /* aim for ~1sec */ } else { assert(nbDecodeLoops < 40000000); /* avoid overflow */ nbDecodeLoops *= 100; } totalDTime += clockSpan; dCompleted = totalDTime > (DECOMP_MULT*maxTime); } } markNb = (markNb+1) % NB_MARKS; DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio, ((double)srcSize / fastestC) * 1000, ((double)srcSize / fastestD) * 1000); /* CRC Checking */ { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); if (crcOrig!=crcCheck) { size_t u; DISPLAY("\n!!! WARNING !!! %17s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck); for (u=0; u<srcSize; u++) { if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) { U32 segNb, bNb, pos; size_t bacc = 0; DISPLAY("Decoding error at pos %u ", (U32)u); for (segNb = 0; segNb < nbBlocks; segNb++) { if (bacc + blockTable[segNb].srcSize > u) break; bacc += blockTable[segNb].srcSize; } pos = (U32)(u - bacc); bNb = pos / (128 KB); DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos); break; } if (u==srcSize-1) { /* should never happen */ DISPLAY("no difference detected\n"); } } break; } } /* CRC Checking */ #endif } /* for (testNb = 1; testNb <= (g_nbSeconds + !g_nbSeconds); testNb++) */ if (g_displayLevel == 1) { double const cSpeed = ((double)srcSize / fastestC) * 1000; double const dSpeed = ((double)srcSize / fastestD) * 1000; if (g_additionalParam) DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam); else DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); } DISPLAYLEVEL(2, "%2i#\n", cLevel); } /* Bench */ /* clean up */ free(blockTable); free(compressedBuffer); free(resultBuffer); return 0; } static size_t BMK_findMaxMem(U64 requiredMem) { size_t step = 64 MB; BYTE* testmem=NULL; requiredMem = (((requiredMem >> 26) + 1) << 26); requiredMem += 2*step; if (requiredMem > maxMemory) requiredMem = maxMemory; while (!testmem) { if (requiredMem > step) requiredMem -= step; else requiredMem >>= 1; testmem = (BYTE*) malloc ((size_t)requiredMem); } free (testmem); /* keep some space available */ if (requiredMem > step) requiredMem -= step; else requiredMem >>= 1; return (size_t)requiredMem; } static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize, const char* displayName, int cLevel, int cLevelLast, const size_t* fileSizes, unsigned nbFiles) { int l; const char* pch = strrchr(displayName, '\\'); /* Windows */ if (!pch) pch = strrchr(displayName, '/'); /* Linux */ if (pch) displayName = pch+1; SET_REALTIME_PRIORITY; if (g_displayLevel == 1 && !g_additionalParam) DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", LZ4_VERSION_STRING, LZ4_GIT_COMMIT_STRING, (U32)benchedSize, g_nbSeconds, (U32)(g_blockSize>>10)); if (cLevelLast < cLevel) cLevelLast = cLevel; for (l=cLevel; l <= cLevelLast; l++) { BMK_benchMem(srcBuffer, benchedSize, displayName, l, fileSizes, nbFiles); } } /*! BMK_loadFiles() : Loads `buffer` with content of files listed within `fileNamesTable`. At most, fills `buffer` entirely */ static void BMK_loadFiles(void* buffer, size_t bufferSize, size_t* fileSizes, const char** fileNamesTable, unsigned nbFiles) { size_t pos = 0, totalSize = 0; unsigned n; for (n=0; n<nbFiles; n++) { FILE* f; U64 fileSize = UTIL_getFileSize(fileNamesTable[n]); if (UTIL_isDirectory(fileNamesTable[n])) { DISPLAYLEVEL(2, "Ignoring %s directory... \n", fileNamesTable[n]); fileSizes[n] = 0; continue; } f = fopen(fileNamesTable[n], "rb"); if (f==NULL) EXM_THROW(10, "impossible to open file %s", fileNamesTable[n]); DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]); if (fileSize > bufferSize-pos) { /* buffer too small - stop after this file */ fileSize = bufferSize-pos; nbFiles=n; } { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); if (readSize != (size_t)fileSize) EXM_THROW(11, "could not read %s", fileNamesTable[n]); pos += readSize; } fileSizes[n] = (size_t)fileSize; totalSize += (size_t)fileSize; fclose(f); } if (totalSize == 0) EXM_THROW(12, "no data to bench"); } static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles, int cLevel, int cLevelLast) { void* srcBuffer; size_t benchedSize; size_t* fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t)); U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); char mfName[20] = {0}; if (!fileSizes) EXM_THROW(12, "not enough memory for fileSizes"); /* Memory allocation & restrictions */ benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; if (benchedSize==0) EXM_THROW(12, "not enough memory"); if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; if (benchedSize > LZ4_MAX_INPUT_SIZE) { benchedSize = LZ4_MAX_INPUT_SIZE; DISPLAY("File(s) bigger than LZ4's max input size; testing %u MB only...\n", (U32)(benchedSize >> 20)); } else { if (benchedSize < totalSizeToLoad) DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20)); } srcBuffer = malloc(benchedSize + !benchedSize); /* avoid alloc of zero */ if (!srcBuffer) EXM_THROW(12, "not enough memory"); /* Load input buffer */ BMK_loadFiles(srcBuffer, benchedSize, fileSizes, fileNamesTable, nbFiles); /* Bench */ snprintf (mfName, sizeof(mfName), " %u files", nbFiles); { const char* displayName = (nbFiles > 1) ? mfName : fileNamesTable[0]; BMK_benchCLevel(srcBuffer, benchedSize, displayName, cLevel, cLevelLast, fileSizes, nbFiles); } /* clean up */ free(srcBuffer); free(fileSizes); } static void BMK_syntheticTest(int cLevel, int cLevelLast, double compressibility) { char name[20] = {0}; size_t benchedSize = 10000000; void* const srcBuffer = malloc(benchedSize); /* Memory allocation */ if (!srcBuffer) EXM_THROW(21, "not enough memory"); /* Fill input buffer */ RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); /* Bench */ snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100)); BMK_benchCLevel(srcBuffer, benchedSize, name, cLevel, cLevelLast, &benchedSize, 1); /* clean up */ free(srcBuffer); } int BMK_benchFilesSeparately(const char** fileNamesTable, unsigned nbFiles, int cLevel, int cLevelLast) { unsigned fileNb; if (cLevel > LZ4HC_CLEVEL_MAX) cLevel = LZ4HC_CLEVEL_MAX; if (cLevelLast > LZ4HC_CLEVEL_MAX) cLevelLast = LZ4HC_CLEVEL_MAX; if (cLevelLast < cLevel) cLevelLast = cLevel; if (cLevelLast > cLevel) DISPLAYLEVEL(2, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast); for (fileNb=0; fileNb<nbFiles; fileNb++) BMK_benchFileTable(fileNamesTable+fileNb, 1, cLevel, cLevelLast); return 0; } int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, int cLevel, int cLevelLast) { double const compressibility = (double)g_compressibilityDefault / 100; if (cLevel > LZ4HC_CLEVEL_MAX) cLevel = LZ4HC_CLEVEL_MAX; if (cLevelLast > LZ4HC_CLEVEL_MAX) cLevelLast = LZ4HC_CLEVEL_MAX; if (cLevelLast < cLevel) cLevelLast = cLevel; if (cLevelLast > cLevel) DISPLAYLEVEL(2, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast); if (nbFiles == 0) BMK_syntheticTest(cLevel, cLevelLast, compressibility); else { if (g_benchSeparately) BMK_benchFilesSeparately(fileNamesTable, nbFiles, cLevel, cLevelLast); else BMK_benchFileTable(fileNamesTable, nbFiles, cLevel, cLevelLast); } return 0; }
22,700
561
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/util.h
/* clang-format off */ /* util.h - utility functions Copyright (C) 2016-present, Przemyslaw Skibinski, Yann Collet This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef UTIL_H_MODULE #define UTIL_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Dependencies ******************************************/ #include "third_party/lz4cli/platform.h" #include "libc/mem/mem.h" #include "libc/str/str.h" #include "libc/stdio/stdio.h" #include "libc/calls/calls.h" #include "libc/time/time.h" #include "libc/errno.h" #include "libc/fmt/fmt.h" #include "libc/time/struct/utimbuf.h" #include "libc/calls/struct/stat.h" #include "libc/calls/struct/dirent.h" #include "libc/sysv/consts/s.h" #include "libc/calls/weirdtypes.h" /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /* ************************************************************ * Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW ***************************************************************/ #if defined(_MSC_VER) && (_MSC_VER >= 1400) # define UTIL_fseek _fseeki64 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ # define UTIL_fseek fseeko #elif defined(__MINGW32__) && defined(__MSVCRT__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) # define UTIL_fseek fseeko64 #else # define UTIL_fseek fseek #endif /*-**************************************** * Sleep functions: Windows - Posix - others ******************************************/ #if defined(_WIN32) # include <windows.h> # define SET_REALTIME_PRIORITY SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS) # define UTIL_sleep(s) Sleep(1000*s) # define UTIL_sleepMilli(milli) Sleep(milli) #elif PLATFORM_POSIX_VERSION >= 0 /* Unix-like operating system */ # include "libc/calls/calls.h" # if defined(PRIO_PROCESS) # define SET_REALTIME_PRIORITY setpriority(PRIO_PROCESS, 0, -20) # else # define SET_REALTIME_PRIORITY /* disabled */ # endif # define UTIL_sleep(s) sleep(s) # if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) || (PLATFORM_POSIX_VERSION >= 200112L) /* nanosleep requires POSIX.1-2001 */ # define UTIL_sleepMilli(milli) { struct timespec t; t.tv_sec=0; t.tv_nsec=milli*1000000ULL; nanosleep(&t, NULL); } # else # define UTIL_sleepMilli(milli) /* disabled */ # endif #else # define SET_REALTIME_PRIORITY /* disabled */ # define UTIL_sleep(s) /* disabled */ # define UTIL_sleepMilli(milli) /* disabled */ #endif /* ************************************* * Constants ***************************************/ #define LIST_SIZE_INCREASE (8*1024) /*-**************************************** * Compiler specifics ******************************************/ #if defined(__INTEL_COMPILER) # pragma warning(disable : 177) /* disable: message #177: function was declared but never referenced, useful with UTIL_STATIC */ #endif #if defined(__GNUC__) # define UTIL_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define UTIL_STATIC static inline #elif defined(_MSC_VER) # define UTIL_STATIC static __inline #else # define UTIL_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-**************************************** * Time functions ******************************************/ #if defined(_WIN32) /* Windows */ typedef LARGE_INTEGER UTIL_time_t; UTIL_STATIC UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { static LARGE_INTEGER ticksPerSecond; static int init = 0; if (!init) { if (!QueryPerformanceFrequency(&ticksPerSecond)) fprintf(stderr, "ERROR: QueryPerformanceFrequency() failure\n"); init = 1; } return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { static LARGE_INTEGER ticksPerSecond; static int init = 0; if (!init) { if (!QueryPerformanceFrequency(&ticksPerSecond)) fprintf(stderr, "ERROR: QueryPerformanceFrequency() failure\n"); init = 1; } return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } #elif defined(__APPLE__) && defined(__MACH__) #include <mach/mach_time.h> typedef U64 UTIL_time_t; UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { static mach_timebase_info_data_t rate; static int init = 0; if (!init) { mach_timebase_info(&rate); init = 1; } return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom)) / 1000ULL; } UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { static mach_timebase_info_data_t rate; static int init = 0; if (!init) { mach_timebase_info(&rate); init = 1; } return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom); } #elif (PLATFORM_POSIX_VERSION >= 200112L) && (defined __UCLIBC__ || (defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17) || __GLIBC__ > 2) ) ) #include "libc/time/time.h" typedef struct timespec UTIL_time_t; UTIL_STATIC UTIL_time_t UTIL_getTime(void) { UTIL_time_t now; if (clock_gettime(CLOCK_MONOTONIC, &now)) fprintf(stderr, "ERROR: Failed to get time\n"); /* we could also exit() */ return now; } UTIL_STATIC UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) { UTIL_time_t diff; if (end.tv_nsec < begin.tv_nsec) { diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; } else { diff.tv_sec = end.tv_sec - begin.tv_sec; diff.tv_nsec = end.tv_nsec - begin.tv_nsec; } return diff; } UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) { UTIL_time_t const diff = UTIL_getSpanTime(begin, end); U64 micro = 0; micro += 1000000ULL * diff.tv_sec; micro += diff.tv_nsec / 1000ULL; return micro; } UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) { UTIL_time_t const diff = UTIL_getSpanTime(begin, end); U64 nano = 0; nano += 1000000000ULL * diff.tv_sec; nano += diff.tv_nsec; return nano; } #else /* relies on standard C (note : clock_t measurements can be wrong when using multi-threading) */ typedef clock_t UTIL_time_t; UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return clock(); } UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } #endif /* returns time span in microseconds */ UTIL_STATIC U64 UTIL_clockSpanMicro(UTIL_time_t clockStart) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeMicro(clockStart, clockEnd); } /* returns time span in nanoseconds */ UTIL_STATIC U64 UTIL_clockSpanNano(UTIL_time_t clockStart) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeNano(clockStart, clockEnd); } UTIL_STATIC void UTIL_waitForNextTick(void) { UTIL_time_t const clockStart = UTIL_getTime(); UTIL_time_t clockEnd; do { clockEnd = UTIL_getTime(); } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); } /*-**************************************** * File functions ******************************************/ UTIL_STATIC int UTIL_isRegFile(const char* infilename); UTIL_STATIC int UTIL_setFileStat(const char *filename, struct stat *statbuf) { int res = 0; struct utimbuf timebuf; if (!UTIL_isRegFile(filename)) return -1; timebuf.actime = time(NULL); timebuf.modtime = statbuf->st_mtim.tv_sec; res += utime(filename, &timebuf); /* set access and modification times */ #if !defined(_WIN32) res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ #endif res += chmod(filename, statbuf->st_mode & 07777); /* Copy file permissions */ errno = 0; return -res; /* number of errors is returned */ } UTIL_STATIC int UTIL_getFileStat(const char* infilename, struct stat *statbuf) { int r; #if defined(_MSC_VER) r = _stat64(infilename, statbuf); if (r || !(statbuf->st_mode & S_IFREG)) return 0; /* No good... */ #else r = stat(infilename, statbuf); if (r || !S_ISREG(statbuf->st_mode)) return 0; /* No good... */ #endif return 1; } UTIL_STATIC int UTIL_isRegFile(const char* infilename) { struct stat statbuf; return UTIL_getFileStat(infilename, &statbuf); /* Only need to know whether it is a regular file */ } UTIL_STATIC U32 UTIL_isDirectory(const char* infilename) { int r; struct stat statbuf; #if defined(_MSC_VER) r = _stat64(infilename, &statbuf); if (!r && (statbuf.st_mode & _S_IFDIR)) return 1; #else r = stat(infilename, &statbuf); if (!r && S_ISDIR(statbuf.st_mode)) return 1; #endif return 0; } UTIL_STATIC U64 UTIL_getFileSize(const char* infilename) { int r; #if defined(_MSC_VER) struct __stat64 statbuf; r = _stat64(infilename, &statbuf); if (r || !(statbuf.st_mode & S_IFREG)) return 0; /* No good... */ #elif defined(__MINGW32__) && defined (__MSVCRT__) struct _stati64 statbuf; r = _stati64(infilename, &statbuf); if (r || !(statbuf.st_mode & S_IFREG)) return 0; /* No good... */ #else struct stat statbuf; r = stat(infilename, &statbuf); if (r || !S_ISREG(statbuf.st_mode)) return 0; /* No good... */ #endif return (U64)statbuf.st_size; } UTIL_STATIC U64 UTIL_getTotalFileSize(const char** fileNamesTable, unsigned nbFiles) { U64 total = 0; unsigned n; for (n=0; n<nbFiles; n++) total += UTIL_getFileSize(fileNamesTable[n]); return total; } /* * A modified version of realloc(). * If UTIL_realloc() fails the original block is freed. */ UTIL_STATIC void *UTIL_realloc(void *ptr, size_t size) { void *newptr = realloc(ptr, size); if (newptr) return newptr; free(ptr); return NULL; } #ifdef _WIN32 # define UTIL_HAS_CREATEFILELIST UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd) { char* path; int dirLength, fnameLength, pathLength, nbFiles = 0; WIN32_FIND_DATAA cFile; HANDLE hFile; dirLength = (int)strlen(dirName); path = (char*) malloc(dirLength + 3); if (!path) return 0; memcpy(path, dirName, dirLength); path[dirLength] = '\\'; path[dirLength+1] = '*'; path[dirLength+2] = 0; hFile=FindFirstFileA(path, &cFile); if (hFile == INVALID_HANDLE_VALUE) { fprintf(stderr, "Cannot open directory '%s'\n", dirName); return 0; } free(path); do { fnameLength = (int)strlen(cFile.cFileName); path = (char*) malloc(dirLength + fnameLength + 2); if (!path) { FindClose(hFile); return 0; } memcpy(path, dirName, dirLength); path[dirLength] = '\\'; memcpy(path+dirLength+1, cFile.cFileName, fnameLength); pathLength = dirLength+1+fnameLength; path[pathLength] = 0; if (cFile.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { if (strcmp (cFile.cFileName, "..") == 0 || strcmp (cFile.cFileName, ".") == 0) continue; nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd); /* Recursively call "UTIL_prepareFileList" with the new path. */ if (*bufStart == NULL) { free(path); FindClose(hFile); return 0; } } else if ((cFile.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) || (cFile.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE) || (cFile.dwFileAttributes & FILE_ATTRIBUTE_COMPRESSED)) { if (*bufStart + *pos + pathLength >= *bufEnd) { ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; *bufStart = (char*)UTIL_realloc(*bufStart, newListSize); *bufEnd = *bufStart + newListSize; if (*bufStart == NULL) { free(path); FindClose(hFile); return 0; } } if (*bufStart + *pos + pathLength < *bufEnd) { strncpy(*bufStart + *pos, path, *bufEnd - (*bufStart + *pos)); *pos += pathLength + 1; nbFiles++; } } free(path); } while (FindNextFileA(hFile, &cFile)); FindClose(hFile); return nbFiles; } #elif defined(__linux__) || (PLATFORM_POSIX_VERSION >= 200112L) /* opendir, readdir require POSIX.1-2001 */ # define UTIL_HAS_CREATEFILELIST # include "libc/calls/calls.h" # include "libc/str/str.h" UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd) { DIR *dir; struct dirent *entry; char* path; int dirLength, fnameLength, pathLength, nbFiles = 0; if (!(dir = opendir(dirName))) { fprintf(stderr, "Cannot open directory '%s': %s\n", dirName, strerror(errno)); return 0; } dirLength = (int)strlen(dirName); errno = 0; while ((entry = readdir(dir)) != NULL) { if (strcmp (entry->d_name, "..") == 0 || strcmp (entry->d_name, ".") == 0) continue; fnameLength = (int)strlen(&entry->d_name[0]); path = (char*) malloc(dirLength + fnameLength + 2); if (!path) { closedir(dir); return 0; } memcpy(path, dirName, dirLength); path[dirLength] = '/'; memcpy(path+dirLength+1, entry->d_name, fnameLength); pathLength = dirLength+1+fnameLength; path[pathLength] = 0; if (UTIL_isDirectory(path)) { nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd); /* Recursively call "UTIL_prepareFileList" with the new path. */ if (*bufStart == NULL) { free(path); closedir(dir); return 0; } } else { if (*bufStart + *pos + pathLength >= *bufEnd) { ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; *bufStart = (char*)UTIL_realloc(*bufStart, newListSize); *bufEnd = *bufStart + newListSize; if (*bufStart == NULL) { free(path); closedir(dir); return 0; } } if (*bufStart + *pos + pathLength < *bufEnd) { strncpy(*bufStart + *pos, path, *bufEnd - (*bufStart + *pos)); *pos += pathLength + 1; nbFiles++; } } free(path); errno = 0; /* clear errno after UTIL_isDirectory, UTIL_prepareFileList */ } if (errno != 0) { fprintf(stderr, "readdir(%s) error: %s\n", dirName, strerror(errno)); free(*bufStart); *bufStart = NULL; } closedir(dir); return nbFiles; } #else UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd) { (void)bufStart; (void)bufEnd; (void)pos; fprintf(stderr, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName); return 0; } #endif /* #ifdef _WIN32 */ /* * UTIL_createFileList - takes a list of files and directories (params: inputNames, inputNamesNb), scans directories, * and returns a new list of files (params: return value, allocatedBuffer, allocatedNamesNb). * After finishing usage of the list the structures should be freed with UTIL_freeFileList(params: return value, allocatedBuffer) * In case of error UTIL_createFileList returns NULL and UTIL_freeFileList should not be called. */ UTIL_STATIC const char** UTIL_createFileList(const char **inputNames, unsigned inputNamesNb, char** allocatedBuffer, unsigned* allocatedNamesNb) { size_t pos; unsigned i, nbFiles; char* buf = (char*)malloc(LIST_SIZE_INCREASE); char* bufend = buf + LIST_SIZE_INCREASE; const char** fileTable; if (!buf) return NULL; for (i=0, pos=0, nbFiles=0; i<inputNamesNb; i++) { if (!UTIL_isDirectory(inputNames[i])) { size_t const len = strlen(inputNames[i]); if (buf + pos + len >= bufend) { ptrdiff_t newListSize = (bufend - buf) + LIST_SIZE_INCREASE; buf = (char*)UTIL_realloc(buf, newListSize); bufend = buf + newListSize; if (!buf) return NULL; } if (buf + pos + len < bufend) { strncpy(buf + pos, inputNames[i], bufend - (buf + pos)); pos += len + 1; nbFiles++; } } else { nbFiles += UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend); if (buf == NULL) return NULL; } } if (nbFiles == 0) { free(buf); return NULL; } fileTable = (const char**)malloc((nbFiles+1) * sizeof(const char*)); if (!fileTable) { free(buf); return NULL; } for (i=0, pos=0; i<nbFiles; i++) { fileTable[i] = buf + pos; pos += strlen(fileTable[i]) + 1; } if (buf + pos > bufend) { free(buf); free((void*)fileTable); return NULL; } *allocatedBuffer = buf; *allocatedNamesNb = nbFiles; return fileTable; } UTIL_STATIC void UTIL_freeFileList(const char** filenameTable, char* allocatedBuffer) { if (allocatedBuffer) free(allocatedBuffer); if (filenameTable) free((void*)filenameTable); } #if defined (__cplusplus) } #endif #endif /* UTIL_H_MODULE */
19,556
576
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/datagen.h
/* clang-format off */ /* datagen.h - compressible data generator header Copyright (C) Yann Collet 2012-2016 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - LZ4 source repository : https://github.com/lz4/lz4 - Public forum : https://groups.google.com/forum/#!forum/lz4c */ void RDG_genOut(unsigned long long size, double matchProba, double litProba, unsigned seed); void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed); /* RDG_genOut Generate 'size' bytes of compressible data into stdout. Compressibility can be controlled using 'matchProba'. 'LitProba' is optional, and affect variability of bytes. If litProba==0.0, default value is used. Generated data can be selected using 'seed'. If (matchProba, litProba and seed) are equal, the function always generate the same content. RDG_genBuffer Same as RDG_genOut, but generate data into provided buffer */
1,675
41
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/README
LZ4(1) Cosmopolitan User Commands -*-text-*- 𝐍𝐀𝐌𝐄 𝗹𝘇𝟰 - lz4, unlz4, lz4cat - Compress or decompress .lz4 files 𝐒𝐘𝐍𝐎𝐏𝐒𝐈𝐒 𝗹𝘇𝟰 [O̲P̲T̲I̲O̲N̲S̲] [-|INPUT-FILE] O̲U̲T̲P̲U̲T̲-̲F̲I̲L̲E̲ 𝘂𝗻𝗹𝘇𝟰 is equivalent to 𝗹𝘇𝟰 -𝗱 𝗹𝘇𝟰𝗰𝗮𝘁 is equivalent to 𝗹𝘇𝟰 -𝗱𝗰𝗳𝗺 When writing scripts that need to decompress files, it is recom‐ mended to always use the name 𝗹𝘇𝟰 with appropriate arguments (𝗹𝘇𝟰 -𝗱 or 𝗹𝘇𝟰 -𝗱𝗰) instead of the names 𝘂𝗻𝗹𝘇𝟰 and 𝗹𝘇𝟰𝗰𝗮𝘁. 𝐃𝐄𝐒𝐂𝐑𝐈𝐏𝐓𝐈𝐎𝐍 𝗹𝘇𝟰 is an extremely fast lossless compression algorithm, based on 𝗯𝘆𝘁𝗲-𝗮𝗹𝗶𝗴𝗻𝗲𝗱 𝐋𝐙𝟳𝟳 family of compression scheme. 𝗹𝘇𝟰 offers com‐ pression speeds of 400 MB/s per core, linearly scalable with multi-core CPUs. It features an extremely fast decoder, with speed in multiple GB/s per core, typically reaching RAM speed limit on multi-core systems. The native file format is the .𝗹𝘇𝟰 format. 𝐃𝗶𝗳𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗯𝗲𝘁𝘄𝗲𝗲𝗻 𝗹𝘇𝟰 𝗮𝗻𝗱 𝗴𝘇𝗶𝗽 𝗹𝘇𝟰 supports a command line syntax similar b̲u̲t̲ n̲o̲t̲ i̲d̲e̲n̲t̲i̲c̲a̲l̲ to 𝗴𝘇𝗶𝗽(𝟭). Differences are : · 𝗹𝘇𝟰 preserves original files · 𝗹𝘇𝟰 compresses a single file by default (see -𝗺 for multiple files) · 𝗹𝘇𝟰 𝗳𝗶𝗹𝗲𝟭 𝗳𝗶𝗹𝗲𝟮 means : compress file1 i̲n̲t̲o̲ file2 · 𝗹𝘇𝟰 𝗳𝗶𝗹𝗲.𝗹𝘇𝟰 will default to decompression (use -𝘇 to force compression) · 𝗹𝘇𝟰 shows real-time notification statistics during compres‐ sion or decompression of a single file (use -𝗾 to silence them) · If no destination name is provided, result is sent to 𝘀𝘁𝗱𝗼𝘂𝘁 e̲x̲c̲e̲p̲t̲ i̲f̲ s̲t̲d̲o̲u̲t̲ i̲s̲ t̲h̲e̲ c̲o̲n̲s̲o̲l̲e̲. · If no destination name is provided, 𝗮𝗻𝗱 if 𝘀𝘁𝗱𝗼𝘂𝘁 is the con‐ sole, 𝗳𝗶𝗹𝗲 is compressed into 𝗳𝗶𝗹𝗲.𝗹𝘇𝟰. · As a consequence of previous rules, note the following exam‐ ple : 𝗹𝘇𝟰 𝗳𝗶𝗹𝗲 | 𝗰𝗼𝗻𝘀𝘂𝗺𝗲𝗿 sends compressed data to 𝗰𝗼𝗻𝘀𝘂𝗺𝗲𝗿 through 𝘀𝘁𝗱𝗼𝘂𝘁, hence it does n̲o̲t̲ create 𝗳𝗶𝗹𝗲.𝗹𝘇𝟰. · Another consequence of those rules is that to run 𝗹𝘇𝟰 under 𝗻𝗼𝗵𝘂𝗽, you should provide a destination file: 𝗻𝗼𝗵𝘂𝗽 𝗹𝘇𝟰 𝗳𝗶𝗹𝗲 𝗳𝗶𝗹𝗲.𝗹𝘇𝟰, because 𝗻𝗼𝗵𝘂𝗽 writes the specified command´s output to a file. Default behaviors can be modified by opt-in commands, detailed below. · 𝗹𝘇𝟰 -𝗺 makes it possible to provide multiple input filenames, which will be compressed into files using suffix .𝗹𝘇𝟰. Progress notifications are also disabled by default (use -𝘃 to enable them). This mode has a behavior which more closely mimics 𝗴𝘇𝗶𝗽 command line, with the main remaining difference being that source files are preserved by default. · Similarly, 𝗹𝘇𝟰 -𝗺 -𝗱 can decompress multiple *.𝗹𝘇𝟰 files. · It´s possible to opt-in to erase source files on successful compression or decompression, using --𝗿𝗺 command. · Consequently, 𝗹𝘇𝟰 -𝗺 --𝗿𝗺 behaves the same as 𝗴𝘇𝗶𝗽. 𝐂𝗼𝗻𝗰𝗮𝘁𝗲𝗻𝗮𝘁𝗶𝗼𝗻 𝗼𝗳 .𝗹𝘇𝟰 𝗳𝗶𝗹𝗲𝘀 It is possible to concatenate .𝗹𝘇𝟰 files as is. 𝗹𝘇𝟰 will decom‐ press such files as if they were a single .𝗹𝘇𝟰 file. For example: lz4 file1 > foo.lz4 lz4 file2 >> foo.lz4 Then 𝗹𝘇𝟰𝗰𝗮𝘁 𝗳𝗼𝗼.𝗹𝘇𝟰 is equivalent to 𝗰𝗮𝘁 𝗳𝗶𝗹𝗲𝟭 𝗳𝗶𝗹𝗲𝟮. 𝐎𝐏𝐓𝐈𝐎𝐍𝐒 𝐒𝗵𝗼𝗿𝘁 𝗰𝗼𝗺𝗺𝗮𝗻𝗱𝘀 𝗰𝗼𝗻𝗰𝗮𝘁𝗲𝗻𝗮𝘁𝗶𝗼𝗻 In some cases, some options can be expressed using short command -𝘅 or long command --𝗹𝗼𝗻𝗴-𝘄𝗼𝗿𝗱. Short commands can be concate‐ nated together. For example, -𝗱 -𝗰 is equivalent to -𝗱𝗰. Long commands cannot be concatenated. They must be clearly separated by a space. 𝐌𝘂𝗹𝘁𝗶𝗽𝗹𝗲 𝗰𝗼𝗺𝗺𝗮𝗻𝗱𝘀 When multiple contradictory commands are issued on a same command line, only the latest one will be applied. 𝐎𝗽𝗲𝗿𝗮𝘁𝗶𝗼𝗻 𝗺𝗼𝗱𝗲 -𝘇 --𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 Compress. This is the default operation mode when no oper‐ ation mode option is specified, no other operation mode is implied from the command name (for example, 𝘂𝗻𝗹𝘇𝟰 implies --𝗱𝗲𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀), nor from the input file name (for example, a file extension .𝗹𝘇𝟰 implies --𝗱𝗲𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 by default). -𝘇 can also be used to force compression of an already com‐ pressed .𝗹𝘇𝟰 file. -𝗱 --𝗱𝗲𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 --𝘂𝗻𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 Decompress. --𝗱𝗲𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 is also the default operation when the input filename has an .𝗹𝘇𝟰 extension. -𝘁 --𝘁𝗲𝘀𝘁 Test the integrity of compressed .𝗹𝘇𝟰 files. The decom‐ pressed data is discarded. No files are created nor removed. -𝗯# Benchmark mode, using # compression level. 𝐎𝗽𝗲𝗿𝗮𝘁𝗶𝗼𝗻 𝗺𝗼𝗱𝗶𝗳𝗶𝗲𝗿𝘀 -# Compression level, with # being any value from 1 to 12. Higher values trade compression speed for compression ratio. Values above 12 are considered the same as 12. Rec‐ ommended values are 1 for fast compression (default), and 9 for high compression. Speed/compression trade-off will vary depending on data to compress. Decompression speed remains fast at all settings. -𝐃 𝗱𝗶𝗰𝘁𝗶𝗼𝗻𝗮𝗿𝘆𝐍𝗮𝗺𝗲 Compress, decompress or benchmark using dictionary d̲i̲c̲t̲i̲o̲‐̲ n̲a̲r̲y̲N̲a̲m̲e̲. Compression and decompression must use the same dictionary to be compatible. Using a different dictionary during decompression will either abort due to decompres‐ sion error, or generate a checksum error. -𝗳 --[𝗻𝗼-]𝗳𝗼𝗿𝗰𝗲 This option has several effects: If the target file already exists, overwrite it without prompting. When used with --𝗱𝗲𝗰𝗼𝗺𝗽𝗿𝗲𝘀𝘀 and 𝗹𝘇𝟰 cannot recognize the type of the source file, copy the source file as is to standard output. This allows 𝗹𝘇𝟰𝗰𝗮𝘁 --𝗳𝗼𝗿𝗰𝗲 to be used like 𝗰𝗮𝘁 (𝟭) for files that have not been compressed with 𝗹𝘇𝟰. -𝗰 --𝘀𝘁𝗱𝗼𝘂𝘁 --𝘁𝗼-𝘀𝘁𝗱𝗼𝘂𝘁 Force write to standard output, even if it is the console. -𝗺 --𝗺𝘂𝗹𝘁𝗶𝗽𝗹𝗲 Multiple input files. Compressed file names will be appended a .𝗹𝘇𝟰 suffix. This mode also reduces notifica‐ tion level. 𝗹𝘇𝟰 -𝗺 has a behavior equivalent to 𝗴𝘇𝗶𝗽 -𝗸 (it preserves source files by default). -𝗿 operate recursively on directories. This mode also sets -𝗺 (multiple input files). -𝐁# Block size [4-7](default : 7) -𝐁𝟰= 64KB ; -𝐁𝟱= 256KB ; -𝐁𝟲= 1MB ; -𝐁𝟳= 4MB -𝐁𝐃 Block Dependency (improves compression ratio on small blocks) --𝗳𝗮𝘀𝘁[=#] switch to ultra-fast compression levels. If =# is not present, it defaults to 𝟭. The higher the value, the faster the compression speed, at the cost of some compres‐ sion ratio. This setting overwrites compression level if one was set previously. Similarly, if a compression level is set after --𝗳𝗮𝘀𝘁, it overrides it. --[𝗻𝗼-]𝗳𝗿𝗮𝗺𝗲-𝗰𝗿𝗰 Select frame checksum (default:enabled) --[𝗻𝗼-]𝗰𝗼𝗻𝘁𝗲𝗻𝘁-𝘀𝗶𝘇𝗲 Header includes original size (default:not present) Note : this option can only be activated when the original size can be determined, hence for a file. It won´t work with unknown source size, such as stdin or pipe. --[𝗻𝗼-]𝘀𝗽𝗮𝗿𝘀𝗲 Sparse mode support (default:enabled on file, disabled on stdout) -𝗹 Use Legacy format (typically for Linux Kernel compression) Note : -𝗹 is not compatible with -𝗺 (--𝗺𝘂𝗹𝘁𝗶𝗽𝗹𝗲) nor -𝗿 𝐎𝘁𝗵𝗲𝗿 𝗼𝗽𝘁𝗶𝗼𝗻𝘀 -𝘃 --𝘃𝗲𝗿𝗯𝗼𝘀𝗲 Verbose mode -𝗾 --𝗾𝘂𝗶𝗲𝘁 Suppress warnings and real-time statistics; specify twice to suppress errors too -𝗵 -𝐇 --𝗵𝗲𝗹𝗽 Display help/long help and exit -𝐕 --𝘃𝗲𝗿𝘀𝗶𝗼𝗻 Display Version number and exit -𝗸 --𝗸𝗲𝗲𝗽 Preserve source files (default behavior) --𝗿𝗺 Delete source files on successful compression or decom‐ pression -- Treat all subsequent arguments as files 𝐁𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝗺𝗼𝗱𝗲 -𝗯# Benchmark file(s), using # compression level -𝗲# Benchmark multiple compression levels, from b# to e# (included) -𝗶# Minimum evaluation time in seconds [1-9] (default : 3) 𝐁𝐔𝐆𝐒 Report bugs at: https://github.com/lz4/lz4/issues 𝐀𝐔𝐓𝐇𝐎𝐑 Yann Collet lz4 1.8.3 September 2018 LZ4(1)
11,376
233
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/datagen.c
/* clang-format off */ /* datagen.c - compressible data generator test tool Copyright (C) Yann Collet 2012-2016 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - LZ4 source repository : https://github.com/lz4/lz4 - Public forum : https://groups.google.com/forum/#!forum/lz4c */ /************************************** * Includes **************************************/ #include "third_party/lz4cli/platform.h" /* Compiler options, SET_BINARY_MODE */ #include "third_party/lz4cli/util.h" /* U32 */ #include "libc/mem/mem.h" /* malloc */ #include "libc/stdio/stdio.h" /* FILE, fwrite */ #include "libc/str/str.h" /* memcpy */ #include "libc/assert.h" #include "libc/runtime/runtime.h" /************************************** * Constants **************************************/ #define KB *(1 <<10) #define PRIME1 2654435761U #define PRIME2 2246822519U /************************************** * Local types **************************************/ #define LTLOG 13 #define LTSIZE (1<<LTLOG) #define LTMASK (LTSIZE-1) typedef BYTE litDistribTable[LTSIZE]; /********************************************************* * Local Functions *********************************************************/ #ifndef MIN #define MIN(a,b) ( (a) < (b) ? (a) :(b) ) #endif #define RDG_rotl32(x,r) ((x << r) | (x >> (32 - r))) static unsigned int RDG_rand(U32* src) { U32 rand32 = *src; rand32 *= PRIME1; rand32 ^= PRIME2; rand32 = RDG_rotl32(rand32, 13); *src = rand32; return rand32; } static void RDG_fillLiteralDistrib(litDistribTable lt, double ld) { BYTE const firstChar = ld <= 0.0 ? 0 : '('; BYTE const lastChar = ld <= 0.0 ? 255 : '}'; BYTE character = ld <= 0.0 ? 0 : '0'; U32 u = 0; while (u<LTSIZE) { U32 const weight = (U32)((double)(LTSIZE - u) * ld) + 1; U32 const end = MIN(u+weight, LTSIZE); while (u < end) { assert(u<LTSIZE); /* try to ease static analyzer. u < end <= LTSIZE */ lt[u++] = character; } character++; if (character > lastChar) character = firstChar; } } static BYTE RDG_genChar(U32* seed, const litDistribTable lt) { U32 id = RDG_rand(seed) & LTMASK; return (lt[id]); } #define RDG_DICTSIZE (32 KB) #define RDG_RAND15BITS ((RDG_rand(seed) >> 3) & 32767) #define RDG_RANDLENGTH ( ((RDG_rand(seed) >> 7) & 7) ? (RDG_rand(seed) & 15) : (RDG_rand(seed) & 511) + 15) void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, litDistribTable lt, unsigned* seedPtr) { BYTE* buffPtr = (BYTE*)buffer; const U32 matchProba32 = (U32)(32768 * matchProba); size_t pos = prefixSize; U32* seed = seedPtr; /* special case */ while (matchProba >= 1.0) { size_t size0 = RDG_rand(seed) & 3; size0 = (size_t)1 << (16 + size0 * 2); size0 += RDG_rand(seed) & (size0-1); /* because size0 is power of 2*/ if (buffSize < pos + size0) { memset(buffPtr+pos, 0, buffSize-pos); return; } memset(buffPtr+pos, 0, size0); pos += size0; buffPtr[pos-1] = RDG_genChar(seed, lt); } /* init */ if (pos==0) { buffPtr[0] = RDG_genChar(seed, lt); pos=1; } /* Generate compressible data */ while (pos < buffSize) { /* Select : Literal (char) or Match (within 32K) */ if (RDG_RAND15BITS < matchProba32) { /* Copy (within 32K) */ size_t match; size_t d; int length = RDG_RANDLENGTH + 4; U32 offset = RDG_RAND15BITS + 1; if (offset > pos) offset = (U32)pos; match = pos - offset; d = pos + length; if (d > buffSize) d = buffSize; while (pos < d) buffPtr[pos++] = buffPtr[match++]; } else { /* Literal (noise) */ size_t d; size_t length = RDG_RANDLENGTH; d = pos + length; if (d > buffSize) d = buffSize; while (pos < d) buffPtr[pos++] = RDG_genChar(seed, lt); } } } void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed) { litDistribTable lt; if (litProba==0.0) litProba = matchProba / 4.5; RDG_fillLiteralDistrib(lt, litProba); RDG_genBlock(buffer, size, 0, matchProba, lt, &seed); } #define RDG_BLOCKSIZE (128 KB) void RDG_genOut(unsigned long long size, double matchProba, double litProba, unsigned seed) { BYTE buff[RDG_DICTSIZE + RDG_BLOCKSIZE]; U64 total = 0; size_t genBlockSize = RDG_BLOCKSIZE; litDistribTable lt; /* init */ if (litProba==0.0) litProba = matchProba / 4.5; RDG_fillLiteralDistrib(lt, litProba); SET_BINARY_MODE(stdout); /* Generate dict */ RDG_genBlock(buff, RDG_DICTSIZE, 0, matchProba, lt, &seed); /* Generate compressible data */ while (total < size) { RDG_genBlock(buff, RDG_DICTSIZE+RDG_BLOCKSIZE, RDG_DICTSIZE, matchProba, lt, &seed); if (size-total < RDG_BLOCKSIZE) genBlockSize = (size_t)(size-total); total += genBlockSize; fwrite(buff, 1, genBlockSize, stdout); /* should check potential write error */ /* update dict */ memcpy(buff, buff + RDG_BLOCKSIZE, RDG_DICTSIZE); } }
6,080
194
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/lz4io.h
/* clang-format off */ /* LZ4io.h - LZ4 File/Stream Interface Copyright (C) Yann Collet 2011-2016 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - LZ4 source repository : https://github.com/lz4/lz4 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c */ /* Note : this is stand-alone program. It is not part of LZ4 compression library, it is a user code of the LZ4 library. - The license of LZ4 library is BSD. - The license of xxHash library is BSD. - The license of this source file is GPLv2. */ #ifndef LZ4IO_H_237902873 #define LZ4IO_H_237902873 /*--- Dependency ---*/ /* ************************************************** */ /* Special input/output values */ /* ************************************************** */ #define NULL_OUTPUT "null" static const char stdinmark[] = "stdin"; static const char stdoutmark[] = "stdout"; #ifdef _WIN32 static const char nulmark[] = "nul"; #else static const char nulmark[] = "/dev/null"; #endif /* ************************************************** */ /* ****************** Functions ********************* */ /* ************************************************** */ int LZ4IO_compressFilename (const char* input_filename, const char* output_filename, int compressionlevel); int LZ4IO_decompressFilename(const char* input_filename, const char* output_filename); int LZ4IO_compressMultipleFilenames(const char** inFileNamesTable, int ifntSize, const char* suffix, int compressionlevel); int LZ4IO_decompressMultipleFilenames(const char** inFileNamesTable, int ifntSize, const char* suffix); /* ************************************************** */ /* ****************** Parameters ******************** */ /* ************************************************** */ int LZ4IO_setDictionaryFilename(const char* dictionaryFilename); /* Default setting : overwrite = 1; return : overwrite mode (0/1) */ int LZ4IO_setOverwrite(int yes); /* Default setting : testMode = 0; return : testMode (0/1) */ int LZ4IO_setTestMode(int yes); /* blockSizeID : valid values : 4-5-6-7 return : 0 if error, blockSize if OK */ size_t LZ4IO_setBlockSizeID(unsigned blockSizeID); /* Default setting : independent blocks */ typedef enum { LZ4IO_blockLinked=0, LZ4IO_blockIndependent} LZ4IO_blockMode_t; int LZ4IO_setBlockMode(LZ4IO_blockMode_t blockMode); /* Default setting : no block checksum */ int LZ4IO_setBlockChecksumMode(int xxhash); /* Default setting : stream checksum enabled */ int LZ4IO_setStreamChecksumMode(int xxhash); /* Default setting : 0 (no notification) */ int LZ4IO_setNotificationLevel(int level); /* Default setting : 0 (disabled) */ int LZ4IO_setSparseFile(int enable); /* Default setting : 0 == no content size present in frame header */ int LZ4IO_setContentSize(int enable); /* Default setting : 0 == src file preserved */ void LZ4IO_setRemoveSrcFile(unsigned flag); /* Default setting : 0 == favor compression ratio * Note : 1 only works for high compression levels (10+) */ void LZ4IO_favorDecSpeed(int favor); #endif /* LZ4IO_H_237902873 */
3,790
109
jart/cosmopolitan
false
cosmopolitan/third_party/lz4cli/lz4cli.c
/* clang-format off */ /* LZ4cli - LZ4 Command Line Interface Copyright (C) Yann Collet 2011-2016 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - LZ4 source repository : https://github.com/lz4/lz4 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c */ /* Note : this is stand-alone program. It is not part of LZ4 compression library, it is a user program of the LZ4 library. The license of LZ4 library is BSD. The license of xxHash library is BSD. The license of this compression CLI program is GPLv2. */ asm(".include \"third_party/lz4cli/COPYING\""); /**************************** * Includes *****************************/ #include "third_party/lz4cli/platform.h" /* Compiler options, IS_CONSOLE */ #include "third_party/lz4cli/util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ #include "libc/stdio/stdio.h" /* fprintf, getchar */ #include "libc/mem/mem.h" /* exit, calloc, free */ #include "libc/str/str.h" /* strcmp, strlen */ #include "third_party/lz4cli/bench.h" /* BMK_benchFile, BMK_SetNbIterations, BMK_SetBlocksize, BMK_SetPause */ #include "third_party/lz4cli/lz4io.h" /* LZ4IO_compressFilename, LZ4IO_decompressFilename, LZ4IO_compressMultipleFilenames */ #include "third_party/lz4cli/lz4hc.h" /* LZ4HC_CLEVEL_MAX */ #include "libc/runtime/runtime.h" #include "libc/log/log.h" #include "libc/stdio/stdio.h" #include "third_party/lz4cli/lz4.h" /* LZ4_VERSION_STRING */ /***************************** * Constants ******************************/ #define COMPRESSOR_NAME "LZ4 command line interface" #define AUTHOR "Yann Collet" #define WELCOME_MESSAGE "*** %s %i-bits v%s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(void*)*8), LZ4_versionString(), AUTHOR #define LZ4_EXTENSION ".lz4" #define LZ4CAT "lz4cat" #define UNLZ4 "unlz4" #define LZ4_LEGACY "lz4c" static int g_lz4c_legacy_commands = 0; #define KB *(1U<<10) #define MB *(1U<<20) #define GB *(1U<<30) #define LZ4_BLOCKSIZEID_DEFAULT 7 /*-************************************ * Macros ***************************************/ #define DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } static unsigned displayLevel = 2; /* 0 : no display ; 1: errors only ; 2 : downgradable normal ; 3 : non-downgradable normal; 4 : + information */ /*-************************************ * Exceptions ***************************************/ #ifndef DEBUG #define DEBUG 0 #endif #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); #define EXM_THROW(error, ...) \ { \ DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ DISPLAYLEVEL(1, "Error %i : ", error); \ DISPLAYLEVEL(1, __VA_ARGS__); \ DISPLAYLEVEL(1, "\n"); \ exit(error); \ } /*-************************************ * Version modifiers ***************************************/ #define DEFAULT_COMPRESSOR LZ4IO_compressFilename #define DEFAULT_DECOMPRESSOR LZ4IO_decompressFilename int LZ4IO_compressFilename_Legacy(const char* input_filename, const char* output_filename, int compressionlevel); /* hidden function */ /*-*************************** * Functions *****************************/ static int usage(const char* exeName) { DISPLAY( "Usage : \n"); DISPLAY( " %s [arg] [input] [output] \n", exeName); DISPLAY( "\n"); DISPLAY( "input : a filename \n"); DISPLAY( " with no FILE, or when FILE is - or %s, read standard input\n", stdinmark); DISPLAY( "Arguments : \n"); DISPLAY( " -1 : Fast compression (default) \n"); DISPLAY( " -9 : High compression \n"); DISPLAY( " -d : decompression (default for %s extension)\n", LZ4_EXTENSION); DISPLAY( " -z : force compression \n"); DISPLAY( " -D FILE: use FILE as dictionary \n"); DISPLAY( " -f : overwrite output without prompting \n"); DISPLAY( " -k : preserve source files(s) (default) \n"); DISPLAY( "--rm : remove source file(s) after successful de/compression \n"); DISPLAY( " -h/-H : display help/long help and exit \n"); return 0; } static int usage_advanced(const char* exeName) { DISPLAY(WELCOME_MESSAGE); usage(exeName); DISPLAY( "\n"); DISPLAY( "Advanced arguments :\n"); DISPLAY( " -V : display Version number and exit \n"); DISPLAY( " -v : verbose mode \n"); DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n"); DISPLAY( " -c : force write to standard output, even if it is the console\n"); DISPLAY( " -t : test compressed file integrity\n"); DISPLAY( " -m : multiple input files (implies automatic output filenames)\n"); #ifdef UTIL_HAS_CREATEFILELIST DISPLAY( " -r : operate recursively on directories (sets also -m) \n"); #endif DISPLAY( " -l : compress using Legacy format (Linux kernel compression)\n"); DISPLAY( " -B# : Block size [4-7] (default : 7) \n"); DISPLAY( " -BD : Block dependency (improve compression ratio) \n"); DISPLAY( " -BX : enable block checksum (default:disabled) \n"); DISPLAY( "--no-frame-crc : disable stream checksum (default:enabled) \n"); DISPLAY( "--content-size : compressed frame includes original size (default:not present)\n"); DISPLAY( "--[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)\n"); DISPLAY( "--favor-decSpeed: compressed files decompress faster, but are less compressed \n"); DISPLAY( "--fast[=#]: switch to ultra fast compression level (default: %u)\n", 1); DISPLAY( "Benchmark arguments : \n"); DISPLAY( " -b# : benchmark file(s), using # compression level (default : 1) \n"); DISPLAY( " -e# : test all compression levels from -bX to # (default : 1)\n"); DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s) \n"); DISPLAY( " -B# : cut file into independent blocks of size # bytes [32+] \n"); DISPLAY( " or predefined block size [4-7] (default: 7) \n"); if (g_lz4c_legacy_commands) { DISPLAY( "Legacy arguments : \n"); DISPLAY( " -c0 : fast compression \n"); DISPLAY( " -c1 : high compression \n"); DISPLAY( " -c2,-hc: very high compression \n"); DISPLAY( " -y : overwrite output without prompting \n"); } return 0; } static int usage_longhelp(const char* exeName) { usage_advanced(exeName); DISPLAY( "\n"); DISPLAY( "****************************\n"); DISPLAY( "***** Advanced comment *****\n"); DISPLAY( "****************************\n"); DISPLAY( "\n"); DISPLAY( "Which values can [output] have ? \n"); DISPLAY( "---------------------------------\n"); DISPLAY( "[output] : a filename \n"); DISPLAY( " '%s', or '-' for standard output (pipe mode)\n", stdoutmark); DISPLAY( " '%s' to discard output (test mode) \n", NULL_OUTPUT); DISPLAY( "[output] can be left empty. In this case, it receives the following value :\n"); DISPLAY( " - if stdout is not the console, then [output] = stdout \n"); DISPLAY( " - if stdout is console : \n"); DISPLAY( " + for compression, output to filename%s \n", LZ4_EXTENSION); DISPLAY( " + for decompression, output to filename without '%s'\n", LZ4_EXTENSION); DISPLAY( " > if input filename has no '%s' extension : error \n", LZ4_EXTENSION); DISPLAY( "\n"); DISPLAY( "Compression levels : \n"); DISPLAY( "---------------------\n"); DISPLAY( "-0 ... -2 => Fast compression, all identicals\n"); DISPLAY( "-3 ... -%d => High compression; higher number == more compression but slower\n", LZ4HC_CLEVEL_MAX); DISPLAY( "\n"); DISPLAY( "stdin, stdout and the console : \n"); DISPLAY( "--------------------------------\n"); DISPLAY( "To protect the console from binary flooding (bad argument mistake)\n"); DISPLAY( "%s will refuse to read from console, or write to console \n", exeName); DISPLAY( "except if '-c' command is specified, to force output to console \n"); DISPLAY( "\n"); DISPLAY( "Simple example :\n"); DISPLAY( "----------------\n"); DISPLAY( "1 : compress 'filename' fast, using default output name 'filename.lz4'\n"); DISPLAY( " %s filename\n", exeName); DISPLAY( "\n"); DISPLAY( "Short arguments can be aggregated. For example :\n"); DISPLAY( "----------------------------------\n"); DISPLAY( "2 : compress 'filename' in high compression mode, overwrite output if exists\n"); DISPLAY( " %s -9 -f filename \n", exeName); DISPLAY( " is equivalent to :\n"); DISPLAY( " %s -9f filename \n", exeName); DISPLAY( "\n"); DISPLAY( "%s can be used in 'pure pipe mode'. For example :\n", exeName); DISPLAY( "-------------------------------------\n"); DISPLAY( "3 : compress data stream from 'generator', send result to 'consumer'\n"); DISPLAY( " generator | %s | consumer \n", exeName); if (g_lz4c_legacy_commands) { DISPLAY( "\n"); DISPLAY( "***** Warning ***** \n"); DISPLAY( "Legacy arguments take precedence. Therefore : \n"); DISPLAY( "--------------------------------- \n"); DISPLAY( " %s -hc filename \n", exeName); DISPLAY( "means 'compress filename in high compression mode' \n"); DISPLAY( "It is not equivalent to : \n"); DISPLAY( " %s -h -c filename \n", exeName); DISPLAY( "which displays help text and exits \n"); } return 0; } static int badusage(const char* exeName) { DISPLAYLEVEL(1, "Incorrect parameters\n"); if (displayLevel >= 1) usage(exeName); exit(1); } static void waitEnter(void) { DISPLAY("Press enter to continue...\n"); (void)getchar(); } static const char* lastNameFromPath(const char* path) { const char* name = path; if (strrchr(name, '/')) name = strrchr(name, '/') + 1; if (strrchr(name, '\\')) name = strrchr(name, '\\') + 1; /* windows */ return name; } /*! exeNameMatch() : @return : a non-zero value if exeName matches test, excluding the extension */ static int exeNameMatch(const char* exeName, const char* test) { return !strncmp(exeName, test, strlen(test)) && (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); } /*! readU32FromChar() : @return : unsigned integer value read from input in `char` format allows and interprets K, KB, KiB, M, MB and MiB suffix. Will also modify `*stringPtr`, advancing it to position where it stopped reading. Note : function result can overflow if digit string > MAX_UINT */ static unsigned readU32FromChar(const char** stringPtr) { unsigned result = 0; while ((**stringPtr >='0') && (**stringPtr <='9')) { result *= 10; result += **stringPtr - '0'; (*stringPtr)++ ; } if ((**stringPtr=='K') || (**stringPtr=='M')) { result <<= 10; if (**stringPtr=='M') result <<= 10; (*stringPtr)++ ; if (**stringPtr=='i') (*stringPtr)++; if (**stringPtr=='B') (*stringPtr)++; } return result; } /** longCommandWArg() : * check if *stringPtr is the same as longCommand. * If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand. * @return 0 and doesn't modify *stringPtr otherwise. */ static unsigned longCommandWArg(const char** stringPtr, const char* longCommand) { size_t const comSize = strlen(longCommand); int const result = !strncmp(*stringPtr, longCommand, comSize); if (result) *stringPtr += comSize; return result; } typedef enum { om_auto, om_compress, om_decompress, om_test, om_bench } operationMode_e; int main(int argc, const char** argv) { int i, cLevel=1, cLevelLast=-10000, legacy_format=0, forceStdout=0, main_pause=0, multiple_inputs=0, all_arguments_are_files=0, operationResult=0; operationMode_e mode = om_auto; const char* input_filename = NULL; const char* output_filename= NULL; const char* dictionary_filename = NULL; char* dynNameSpace = NULL; const char** inFileNames = (const char**) calloc(argc, sizeof(char*)); unsigned ifnIdx=0; const char nullOutput[] = NULL_OUTPUT; const char extension[] = LZ4_EXTENSION; size_t blockSize = LZ4IO_setBlockSizeID(LZ4_BLOCKSIZEID_DEFAULT); const char* const exeName = lastNameFromPath(argv[0]); #ifdef UTIL_HAS_CREATEFILELIST const char** extendedFileList = NULL; char* fileNamesBuf = NULL; unsigned fileNamesNb, recursive=0; #endif /* Init */ if (inFileNames==NULL) { DISPLAY("Allocation error : not enough memory \n"); return 1; } inFileNames[0] = stdinmark; LZ4IO_setOverwrite(0); /* predefined behaviors, based on binary/link name */ if (exeNameMatch(exeName, LZ4CAT)) { mode = om_decompress; LZ4IO_setOverwrite(1); LZ4IO_setRemoveSrcFile(0); forceStdout=1; output_filename=stdoutmark; displayLevel=1; multiple_inputs=1; } if (exeNameMatch(exeName, UNLZ4)) { mode = om_decompress; } if (exeNameMatch(exeName, LZ4_LEGACY)) { g_lz4c_legacy_commands=1; } /* command switches */ for(i=1; i<argc; i++) { const char* argument = argv[i]; if(!argument) continue; /* Protection if argument empty */ /* Short commands (note : aggregated short commands are allowed) */ if (!all_arguments_are_files && argument[0]=='-') { /* '-' means stdin/stdout */ if (argument[1]==0) { if (!input_filename) input_filename=stdinmark; else output_filename=stdoutmark; continue; } /* long commands (--long-word) */ if (argument[1]=='-') { if (!strcmp(argument, "--")) { all_arguments_are_files = 1; continue; } if (!strcmp(argument, "--compress")) { mode = om_compress; continue; } if ((!strcmp(argument, "--decompress")) || (!strcmp(argument, "--uncompress"))) { mode = om_decompress; continue; } if (!strcmp(argument, "--multiple")) { multiple_inputs = 1; continue; } if (!strcmp(argument, "--test")) { mode = om_test; continue; } if (!strcmp(argument, "--force")) { LZ4IO_setOverwrite(1); continue; } if (!strcmp(argument, "--no-force")) { LZ4IO_setOverwrite(0); continue; } if ((!strcmp(argument, "--stdout")) || (!strcmp(argument, "--to-stdout"))) { forceStdout=1; output_filename=stdoutmark; continue; } if (!strcmp(argument, "--frame-crc")) { LZ4IO_setStreamChecksumMode(1); continue; } if (!strcmp(argument, "--no-frame-crc")) { LZ4IO_setStreamChecksumMode(0); continue; } if (!strcmp(argument, "--content-size")) { LZ4IO_setContentSize(1); continue; } if (!strcmp(argument, "--no-content-size")) { LZ4IO_setContentSize(0); continue; } if (!strcmp(argument, "--sparse")) { LZ4IO_setSparseFile(2); continue; } if (!strcmp(argument, "--no-sparse")) { LZ4IO_setSparseFile(0); continue; } if (!strcmp(argument, "--favor-decSpeed")) { LZ4IO_favorDecSpeed(1); continue; } if (!strcmp(argument, "--verbose")) { displayLevel++; continue; } if (!strcmp(argument, "--quiet")) { if (displayLevel) displayLevel--; continue; } if (!strcmp(argument, "--version")) { DISPLAY(WELCOME_MESSAGE); return 0; } if (!strcmp(argument, "--help")) { usage_advanced(exeName); goto _cleanup; } if (!strcmp(argument, "--keep")) { LZ4IO_setRemoveSrcFile(0); continue; } /* keep source file (default) */ if (!strcmp(argument, "--rm")) { LZ4IO_setRemoveSrcFile(1); continue; } if (longCommandWArg(&argument, "--fast")) { /* Parse optional acceleration factor */ if (*argument == '=') { U32 fastLevel; ++argument; fastLevel = readU32FromChar(&argument); if (fastLevel) { cLevel = -(int)fastLevel; } else { badusage(exeName); } } else if (*argument != 0) { /* Invalid character following --fast */ badusage(exeName); } else { cLevel = -1; /* default for --fast */ } continue; } } while (argument[1]!=0) { argument ++; if (g_lz4c_legacy_commands) { /* Legacy commands (-c0, -c1, -hc, -y) */ if (!strcmp(argument, "c0")) { cLevel=0; argument++; continue; } /* -c0 (fast compression) */ if (!strcmp(argument, "c1")) { cLevel=9; argument++; continue; } /* -c1 (high compression) */ if (!strcmp(argument, "c2")) { cLevel=12; argument++; continue; } /* -c2 (very high compression) */ if (!strcmp(argument, "hc")) { cLevel=12; argument++; continue; } /* -hc (very high compression) */ if (!strcmp(argument, "y")) { LZ4IO_setOverwrite(1); continue; } /* -y (answer 'yes' to overwrite permission) */ } if ((*argument>='0') && (*argument<='9')) { cLevel = readU32FromChar(&argument); argument--; continue; } switch(argument[0]) { /* Display help */ case 'V': DISPLAY(WELCOME_MESSAGE); goto _cleanup; /* Version */ case 'h': usage_advanced(exeName); goto _cleanup; case 'H': usage_longhelp(exeName); goto _cleanup; case 'e': argument++; cLevelLast = readU32FromChar(&argument); argument--; break; /* Compression (default) */ case 'z': mode = om_compress; break; case 'D': if (argument[1] == '\0') { /* path is next arg */ if (i + 1 == argc) { /* there is no next arg */ badusage(exeName); } dictionary_filename = argv[++i]; } else { /* path follows immediately */ dictionary_filename = argument + 1; } /* skip to end of argument so that we jump to parsing next argument */ argument += strlen(argument) - 1; break; /* Use Legacy format (ex : Linux kernel compression) */ case 'l': legacy_format = 1; blockSize = 8 MB; break; /* Decoding */ case 'd': mode = om_decompress; break; /* Force stdout, even if stdout==console */ case 'c': forceStdout=1; output_filename=stdoutmark; break; /* Test integrity */ case 't': mode = om_test; break; /* Overwrite */ case 'f': LZ4IO_setOverwrite(1); break; /* Verbose mode */ case 'v': displayLevel++; break; /* Quiet mode */ case 'q': if (displayLevel) displayLevel--; break; /* keep source file (default anyway, so useless) (for xz/lzma compatibility) */ case 'k': LZ4IO_setRemoveSrcFile(0); break; /* Modify Block Properties */ case 'B': while (argument[1]!=0) { int exitBlockProperties=0; switch(argument[1]) { case 'D': LZ4IO_setBlockMode(LZ4IO_blockLinked); argument++; break; case 'X': LZ4IO_setBlockChecksumMode(1); argument ++; break; /* disabled by default */ default : if (argument[1] < '0' || argument[1] > '9') { exitBlockProperties=1; break; } else { unsigned B; argument++; B = readU32FromChar(&argument); argument--; if (B < 4) badusage(exeName); if (B <= 7) { blockSize = LZ4IO_setBlockSizeID(B); BMK_setBlockSize(blockSize); DISPLAYLEVEL(2, "using blocks of size %u KB \n", (U32)(blockSize>>10)); } else { if (B < 32) badusage(exeName); BMK_setBlockSize(B); if (B >= 1024) { DISPLAYLEVEL(2, "bench: using blocks of size %u KB \n", (U32)(B>>10)); } else { DISPLAYLEVEL(2, "bench: using blocks of size %u bytes \n", (U32)(B)); } } break; } } if (exitBlockProperties) break; } break; /* Benchmark */ case 'b': mode = om_bench; multiple_inputs=1; break; /* hidden command : benchmark files, but do not fuse result */ case 'S': BMK_setBenchSeparately(1); break; #ifdef UTIL_HAS_CREATEFILELIST /* recursive */ case 'r': recursive=1; #endif /* fall-through */ /* Treat non-option args as input files. See https://code.google.com/p/lz4/issues/detail?id=151 */ case 'm': multiple_inputs=1; break; /* Modify Nb Seconds (benchmark only) */ case 'i': { unsigned iters; argument++; iters = readU32FromChar(&argument); argument--; BMK_setNotificationLevel(displayLevel); BMK_setNbSeconds(iters); /* notification if displayLevel >= 3 */ } break; /* Pause at the end (hidden option) */ case 'p': main_pause=1; break; /* Unrecognised command */ default : badusage(exeName); } } continue; } /* Store in *inFileNames[] if -m is used. */ if (multiple_inputs) { inFileNames[ifnIdx++]=argument; continue; } /* Store first non-option arg in input_filename to preserve original cli logic. */ if (!input_filename) { input_filename=argument; continue; } /* Second non-option arg in output_filename to preserve original cli logic. */ if (!output_filename) { output_filename=argument; if (!strcmp (output_filename, nullOutput)) output_filename = nulmark; continue; } /* 3rd non-option arg should not exist */ DISPLAYLEVEL(1, "Warning : %s won't be used ! Do you want multiple input files (-m) ? \n", argument); } DISPLAYLEVEL(3, WELCOME_MESSAGE); #ifdef _POSIX_C_SOURCE DISPLAYLEVEL(4, "_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); #endif #ifdef _POSIX_VERSION DISPLAYLEVEL(4, "_POSIX_VERSION defined: %ldL\n", (long) _POSIX_VERSION); #endif #ifdef PLATFORM_POSIX_VERSION DISPLAYLEVEL(4, "PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION); #endif #ifdef _FILE_OFFSET_BITS DISPLAYLEVEL(4, "_FILE_OFFSET_BITS defined: %ldL\n", (long) _FILE_OFFSET_BITS); #endif if ((mode == om_compress) || (mode == om_bench)) DISPLAYLEVEL(4, "Blocks size : %u KB\n", (U32)(blockSize>>10)); if (multiple_inputs) { input_filename = inFileNames[0]; #ifdef UTIL_HAS_CREATEFILELIST if (recursive) { /* at this stage, filenameTable is a list of paths, which can contain both files and directories */ extendedFileList = UTIL_createFileList(inFileNames, ifnIdx, &fileNamesBuf, &fileNamesNb); if (extendedFileList) { unsigned u; for (u=0; u<fileNamesNb; u++) DISPLAYLEVEL(4, "%u %s\n", u, extendedFileList[u]); free((void*)inFileNames); inFileNames = extendedFileList; ifnIdx = fileNamesNb; } } #endif } /* benchmark and test modes */ if (mode == om_bench) { BMK_setNotificationLevel(displayLevel); operationResult = BMK_benchFiles(inFileNames, ifnIdx, cLevel, cLevelLast); goto _cleanup; } if (mode == om_test) { LZ4IO_setTestMode(1); output_filename = nulmark; mode = om_decompress; /* defer to decompress */ } if (dictionary_filename) { if (!strcmp(dictionary_filename, stdinmark) && IS_CONSOLE(stdin)) { DISPLAYLEVEL(1, "refusing to read from a console\n"); exit(1); } LZ4IO_setDictionaryFilename(dictionary_filename); } /* compress or decompress */ if (!input_filename) input_filename = stdinmark; /* Check if input is defined as console; trigger an error in this case */ if (!strcmp(input_filename, stdinmark) && IS_CONSOLE(stdin) ) { DISPLAYLEVEL(1, "refusing to read from a console\n"); exit(1); } /* if input==stdin and no output defined, stdout becomes default output */ if (!strcmp(input_filename, stdinmark) && !output_filename) output_filename = stdoutmark; /* No output filename ==> try to select one automatically (when possible) */ while ((!output_filename) && (multiple_inputs==0)) { if (!IS_CONSOLE(stdout)) { output_filename=stdoutmark; break; } /* Default to stdout whenever possible (i.e. not a console) */ if (mode == om_auto) { /* auto-determine compression or decompression, based on file extension */ size_t const inSize = strlen(input_filename); size_t const extSize = strlen(LZ4_EXTENSION); size_t const extStart= (inSize > extSize) ? inSize-extSize : 0; if (!strcmp(input_filename+extStart, LZ4_EXTENSION)) mode = om_decompress; else mode = om_compress; } if (mode == om_compress) { /* compression to file */ size_t const l = strlen(input_filename); dynNameSpace = (char*)calloc(1,l+5); if (dynNameSpace==NULL) { perror(exeName); exit(1); } strcpy(dynNameSpace, input_filename); strcat(dynNameSpace, LZ4_EXTENSION); output_filename = dynNameSpace; DISPLAYLEVEL(2, "Compressed filename will be : %s \n", output_filename); break; } if (mode == om_decompress) {/* decompression to file (automatic name will work only if input filename has correct format extension) */ size_t outl; size_t const inl = strlen(input_filename); dynNameSpace = (char*)calloc(1,inl+1); if (dynNameSpace==NULL) { perror(exeName); exit(1); } strcpy(dynNameSpace, input_filename); outl = inl; if (inl>4) while ((outl >= inl-4) && (input_filename[outl] == extension[outl-inl+4])) dynNameSpace[outl--]=0; if (outl != inl-5) { DISPLAYLEVEL(1, "Cannot determine an output filename\n"); badusage(exeName); } output_filename = dynNameSpace; DISPLAYLEVEL(2, "Decoding file %s \n", output_filename); } break; } /* Check if output is defined as console; trigger an error in this case */ if (!output_filename) output_filename = "*\\dummy^!//"; if (!strcmp(output_filename,stdoutmark) && IS_CONSOLE(stdout) && !forceStdout) { DISPLAYLEVEL(1, "refusing to write to console without -c\n"); exit(1); } /* Downgrade notification level in stdout and multiple file mode */ if (!strcmp(output_filename,stdoutmark) && (displayLevel==2)) displayLevel=1; if ((multiple_inputs) && (displayLevel==2)) displayLevel=1; /* IO Stream/File */ LZ4IO_setNotificationLevel(displayLevel); if (ifnIdx == 0) multiple_inputs = 0; if (mode == om_decompress) { if (multiple_inputs) operationResult = LZ4IO_decompressMultipleFilenames(inFileNames, ifnIdx, !strcmp(output_filename,stdoutmark) ? stdoutmark : LZ4_EXTENSION); else operationResult = DEFAULT_DECOMPRESSOR(input_filename, output_filename); } else { /* compression is default action */ if (legacy_format) { DISPLAYLEVEL(3, "! Generating LZ4 Legacy format (deprecated) ! \n"); LZ4IO_compressFilename_Legacy(input_filename, output_filename, cLevel); } else { if (multiple_inputs) operationResult = LZ4IO_compressMultipleFilenames(inFileNames, ifnIdx, LZ4_EXTENSION, cLevel); else operationResult = DEFAULT_COMPRESSOR(input_filename, output_filename, cLevel); } } _cleanup: if (main_pause) waitEnter(); free(dynNameSpace); #ifdef UTIL_HAS_CREATEFILELIST if (extendedFileList) { UTIL_freeFileList(extendedFileList, fileNamesBuf); inFileNames = NULL; } #endif free((void*)inFileNames); return operationResult; }
31,470
717
jart/cosmopolitan
false