| /*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------=== |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to deal |
| * in the Software without restriction, including without limitation the rights |
| * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| * copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in |
| * all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| * THE SOFTWARE. |
| * |
| *===-----------------------------------------------------------------------=== |
| */ |
| |
| #ifndef __IMMINTRIN_H |
| #error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead." |
| #endif |
| |
| #ifndef __AVX512VLINTRIN_H |
| #define __AVX512VLINTRIN_H |
| |
| #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"))) |
| |
| /* Doesn't require avx512vl, used in avx512dqintrin.h */ |
| static __inline __m128i __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) |
| _mm_setzero_di(void) { |
| return (__m128i)(__v2di){ 0LL, 0LL}; |
| } |
| |
| /* Integer compare */ |
| |
| #define _mm_cmpeq_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm_mask_cmpeq_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm_cmpge_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm_mask_cmpge_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm_cmpgt_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm_mask_cmpgt_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm_cmple_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm_mask_cmple_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm_cmplt_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm_mask_cmplt_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm_cmpneq_epi32_mask(A, B) \ |
| _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm_mask_cmpneq_epi32_mask(k, A, B) \ |
| _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm256_cmpeq_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm256_mask_cmpeq_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm256_cmpge_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm256_mask_cmpge_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm256_cmpgt_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm256_mask_cmpgt_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm256_cmple_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm256_mask_cmple_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm256_cmplt_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm256_mask_cmplt_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm256_cmpneq_epi32_mask(A, B) \ |
| _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm256_mask_cmpneq_epi32_mask(k, A, B) \ |
| _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm_cmpeq_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm_mask_cmpeq_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm_cmpge_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm_mask_cmpge_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm_cmpgt_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm_mask_cmpgt_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm_cmple_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm_mask_cmple_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm_cmplt_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm_mask_cmplt_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm_cmpneq_epu32_mask(A, B) \ |
| _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm_mask_cmpneq_epu32_mask(k, A, B) \ |
| _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm256_cmpeq_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm256_mask_cmpeq_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm256_cmpge_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm256_mask_cmpge_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm256_cmpgt_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm256_mask_cmpgt_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm256_cmple_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm256_mask_cmple_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm256_cmplt_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm256_mask_cmplt_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm256_cmpneq_epu32_mask(A, B) \ |
| _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm256_mask_cmpneq_epu32_mask(k, A, B) \ |
| _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm_cmpeq_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm_mask_cmpeq_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm_cmpge_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm_mask_cmpge_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm_cmpgt_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm_mask_cmpgt_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm_cmple_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm_mask_cmple_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm_cmplt_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm_mask_cmplt_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm_cmpneq_epi64_mask(A, B) \ |
| _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm_mask_cmpneq_epi64_mask(k, A, B) \ |
| _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm256_cmpeq_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm256_mask_cmpeq_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm256_cmpge_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm256_mask_cmpge_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm256_cmpgt_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm256_mask_cmpgt_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm256_cmple_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm256_mask_cmple_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm256_cmplt_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm256_mask_cmplt_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm256_cmpneq_epi64_mask(A, B) \ |
| _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm256_mask_cmpneq_epi64_mask(k, A, B) \ |
| _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm_cmpeq_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm_mask_cmpeq_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm_cmpge_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm_mask_cmpge_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm_cmpgt_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm_mask_cmpgt_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm_cmple_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm_mask_cmple_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm_cmplt_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm_mask_cmplt_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm_cmpneq_epu64_mask(A, B) \ |
| _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm_mask_cmpneq_epu64_mask(k, A, B) \ |
| _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| #define _mm256_cmpeq_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) |
| #define _mm256_mask_cmpeq_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) |
| #define _mm256_cmpge_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) |
| #define _mm256_mask_cmpge_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) |
| #define _mm256_cmpgt_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) |
| #define _mm256_mask_cmpgt_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) |
| #define _mm256_cmple_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) |
| #define _mm256_mask_cmple_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) |
| #define _mm256_cmplt_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) |
| #define _mm256_mask_cmplt_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) |
| #define _mm256_cmpneq_epu64_mask(A, B) \ |
| _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) |
| #define _mm256_mask_cmpneq_epu64_mask(k, A, B) \ |
| _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_add_epi32(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_add_epi32(__A, __B), |
| (__v8si)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_add_epi64(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_add_epi64(__A, __B), |
| (__v4di)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_sub_epi32(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_sub_epi32(__A, __B), |
| (__v8si)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_sub_epi64(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_sub_epi64(__A, __B), |
| (__v4di)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_add_epi32(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_add_epi32(__A, __B), |
| (__v4si)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_add_epi64(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_add_epi64(__A, __B), |
| (__v2di)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_sub_epi32(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_sub_epi32(__A, __B), |
| (__v4si)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_sub_epi64(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_sub_epi64(__A, __B), |
| (__v2di)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, |
| (__v4di)_mm256_mul_epi32(__X, __Y), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, |
| (__v4di)_mm256_mul_epi32(__X, __Y), |
| (__v4di)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, |
| (__v2di)_mm_mul_epi32(__X, __Y), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, |
| (__v2di)_mm_mul_epi32(__X, __Y), |
| (__v2di)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, |
| (__v4di)_mm256_mul_epu32(__X, __Y), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, |
| (__v4di)_mm256_mul_epu32(__X, __Y), |
| (__v4di)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, |
| (__v2di)_mm_mul_epu32(__X, __Y), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, |
| (__v2di)_mm_mul_epu32(__X, __Y), |
| (__v2di)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, |
| (__v8si)_mm256_mullo_epi32(__A, __B), |
| (__v8si)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, |
| (__v8si)_mm256_mullo_epi32(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, |
| (__v4si)_mm_mullo_epi32(__A, __B), |
| (__v4si)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, |
| (__v4si)_mm_mullo_epi32(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_and_si256(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_and_si128(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_andnot_si256(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(), |
| __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_andnot_si128(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_or_si256(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_or_si128(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, |
| (__v8si)_mm256_xor_si256(__A, __B), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, |
| __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_xor_si128(__A, __B), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_and_si256(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_and_si128(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_andnot_si256(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(), |
| __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_andnot_si128(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_or_si256(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_or_si128(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, |
| (__v4di)_mm256_xor_si256(__A, __B), |
| (__v4di)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B) |
| { |
| return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, |
| __m128i __B) |
| { |
| return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, |
| (__v2di)_mm_xor_si128(__A, __B), |
| (__v2di)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B) |
| { |
| return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B); |
| } |
| |
| #define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ |
| (__v4si)(__m128i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ |
| (__v4si)(__m128i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ |
| (__v4si)(__m128i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ |
| (__v4si)(__m128i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ |
| (__v8si)(__m256i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ |
| (__v8si)(__m256i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ |
| (__v8si)(__m256i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ |
| (__v8si)(__m256i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ |
| (__v2di)(__m128i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ |
| (__v2di)(__m128i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ |
| (__v2di)(__m128i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ |
| (__v2di)(__m128i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ |
| (__v4di)(__m256i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ |
| (__v4di)(__m256i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ |
| (__v4di)(__m256i)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ |
| (__v4di)(__m256i)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ |
| (__v8sf)(__m256)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ |
| (__v8sf)(__m256)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ |
| (__v4df)(__m256d)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ |
| (__v4df)(__m256d)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm_cmp_ps_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ |
| (__v4sf)(__m128)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ |
| (__v4sf)(__m128)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| #define _mm_cmp_pd_mask(a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ |
| (__v2df)(__m128d)(b), (int)(p), \ |
| (__mmask8)-1); }) |
| |
| #define _mm_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ |
| (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ |
| (__v2df)(__m128d)(b), (int)(p), \ |
| (__mmask8)(m)); }) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| -(__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A, |
| (__v2df) __B, |
| -(__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A, |
| (__v2df) __B, |
| -(__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| -(__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A, |
| (__v4df) __B, |
| -(__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A, |
| (__v4df) __B, |
| -(__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| -(__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A, |
| (__v4sf) __B, |
| -(__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A, |
| (__v4sf) __B, |
| -(__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| -(__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A, |
| (__v8sf) __B, |
| -(__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A, |
| (__v8sf) __B, |
| -(__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| -(__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A, |
| (__v2df) __B, |
| -(__v2df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| -(__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A, |
| (__v4df) __B, |
| -(__v4df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| -(__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A, |
| (__v4sf) __B, |
| -(__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, |
| __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| -(__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A, |
| (__v8sf) __B, |
| -(__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) |
| { |
| return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) |
| { |
| return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) |
| { |
| return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) |
| { |
| return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A, |
| (__v4df) __B, |
| (__v4df) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) |
| { |
| return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) |
| { |
| return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) |
| { |
| return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) |
| { |
| return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A, |
| (__v8sf) __B, |
| (__v8sf) __C, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_add_pd(__A, __B), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_add_pd(__A, __B), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_add_pd(__A, __B), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_add_pd(__A, __B), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_add_ps(__A, __B), |
| (__v4sf)__W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_add_ps(__A, __B), |
| (__v4sf)_mm_setzero_ps()); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_add_ps(__A, __B), |
| (__v8sf)__W); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_add_ps(__A, __B), |
| (__v8sf)_mm256_setzero_ps()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) { |
| return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, |
| (__v4si) __W, |
| (__v4si) __A); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) { |
| return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, |
| (__v8si) __W, |
| (__v8si) __A); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) { |
| return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, |
| (__v2df) __W, |
| (__v2df) __A); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) { |
| return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, |
| (__v4df) __W, |
| (__v4df) __A); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) { |
| return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, |
| (__v4sf) __W, |
| (__v4sf) __A); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) { |
| return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, |
| (__v8sf) __W, |
| (__v8sf) __A); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) { |
| return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, |
| (__v2di) __W, |
| (__v2di) __A); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) { |
| return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, |
| (__v4di) __W, |
| (__v4di) __A); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_compress_pd (__mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, |
| (__v4df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, |
| (__v2di) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, |
| (__v2di) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, |
| (__v4di) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, |
| (__v4di) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_compress_ps (__mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) { |
| __builtin_ia32_compressstoredf128_mask ((__v2df *) __P, |
| (__v2df) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) { |
| __builtin_ia32_compressstoredf256_mask ((__v4df *) __P, |
| (__v4df) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) { |
| __builtin_ia32_compressstoredi128_mask ((__v2di *) __P, |
| (__v2di) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) { |
| __builtin_ia32_compressstoredi256_mask ((__v4di *) __P, |
| (__v4di) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) { |
| __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P, |
| (__v4sf) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) { |
| __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P, |
| (__v8sf) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) { |
| __builtin_ia32_compressstoresi128_mask ((__v4si *) __P, |
| (__v4si) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ void __DEFAULT_FN_ATTRS |
| _mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) { |
| __builtin_ia32_compressstoresi256_mask ((__v8si *) __P, |
| (__v8si) __A, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, |
| (__v2df)_mm_cvtepi32_pd(__A), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, |
| (__v2df)_mm_cvtepi32_pd(__A), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, |
| (__v4df)_mm256_cvtepi32_pd(__A), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, |
| (__v4df)_mm256_cvtepi32_pd(__A), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) { |
| return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) { |
| return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) { |
| return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) { |
| return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) { |
| return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) { |
| return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) { |
| return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) { |
| return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_cvtpd_epu32 (__m128d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_cvtpd_epu32 (__m256d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) { |
| return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { |
| return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) { |
| return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, |
| (__v4df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { |
| return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_cvtps_epu32 (__m128 __A) { |
| return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_cvtps_epu32 (__m256 __A) { |
| return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_cvttpd_epu32 (__m128d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_cvttpd_epu32 (__m256d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) { |
| return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_cvttps_epu32 (__m128 __A) { |
| return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) { |
| return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_cvttps_epu32 (__m256 __A) { |
| return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) { |
| return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_cvtepu32_pd (__m128i __A) { |
| return (__m128d) __builtin_convertvector( |
| __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, |
| (__v2df)_mm_cvtepu32_pd(__A), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, |
| (__v2df)_mm_cvtepu32_pd(__A), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_cvtepu32_pd (__m128i __A) { |
| return (__m256d)__builtin_convertvector((__v4su)__A, __v4df); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, |
| (__v4df)_mm256_cvtepu32_pd(__A), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, |
| (__v4df)_mm256_cvtepu32_pd(__A), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_cvtepu32_ps (__m128i __A) { |
| return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) { |
| return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) { |
| return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_cvtepu32_ps (__m256i __A) { |
| return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) { |
| return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) { |
| return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_div_pd(__A, __B), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_div_pd(__A, __B), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_div_pd(__A, __B), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_div_pd(__A, __B), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_div_ps(__A, __B), |
| (__v4sf)__W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_div_ps(__A, __B), |
| (__v4sf)_mm_setzero_ps()); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_div_ps(__A, __B), |
| (__v8sf)__W); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_div_ps(__A, __B), |
| (__v8sf)_mm256_setzero_ps()); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_expand_pd (__mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, |
| (__v4df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, |
| (__v2di) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, |
| (__v2di) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, |
| (__v4di) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, |
| (__v4di) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) { |
| return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P, |
| (__v2df) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { |
| return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) { |
| return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P, |
| (__v4df) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { |
| return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) { |
| return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P, |
| (__v2di) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { |
| return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P, |
| (__v2di) |
| _mm_setzero_si128 (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U, |
| void const *__P) { |
| return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P, |
| (__v4di) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { |
| return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P, |
| (__v4di) |
| _mm256_setzero_si256 (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) { |
| return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { |
| return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) { |
| return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { |
| return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) { |
| return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P, |
| (__v4si) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { |
| return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U, |
| void const *__P) { |
| return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P, |
| (__v8si) __W, |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { |
| return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) |
| __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_expand_ps (__mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, |
| (__v4si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) { |
| return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, |
| (__v4si) |
| _mm_setzero_si128 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, |
| (__v8si) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) { |
| return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, |
| (__v8si) |
| _mm256_setzero_si256 (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_getexp_pd (__m128d __A) { |
| return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) { |
| return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_getexp_pd (__m256d __A) { |
| return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, |
| (__v4df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) { |
| return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, |
| (__v4df) |
| _mm256_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_getexp_ps (__m128 __A) { |
| return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) { |
| return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_getexp_ps (__m256 __A) { |
| return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, |
| (__v8sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) { |
| return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, |
| (__v8sf) |
| _mm256_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_max_pd(__A, __B), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_max_pd(__A, __B), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_max_pd(__A, __B), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_max_pd(__A, __B), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_max_ps(__A, __B), |
| (__v4sf)__W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_max_ps(__A, __B), |
| (__v4sf)_mm_setzero_ps()); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_max_ps(__A, __B), |
| (__v8sf)__W); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_max_ps(__A, __B), |
| (__v8sf)_mm256_setzero_ps()); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_min_pd(__A, __B), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_min_pd(__A, __B), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_min_pd(__A, __B), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_min_pd(__A, __B), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_min_ps(__A, __B), |
| (__v4sf)__W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_min_ps(__A, __B), |
| (__v4sf)_mm_setzero_ps()); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_min_ps(__A, __B), |
| (__v8sf)__W); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_min_ps(__A, __B), |
| (__v8sf)_mm256_setzero_ps()); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_mul_pd(__A, __B), |
| (__v2df)__W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS |
| _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { |
| return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, |
| (__v2df)_mm_mul_pd(__A, __B), |
| (__v2df)_mm_setzero_pd()); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_mul_pd(__A, __B), |
| (__v4df)__W); |
| } |
| |
| static __inline__ __m256d __DEFAULT_FN_ATTRS |
| _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { |
| return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, |
| (__v4df)_mm256_mul_pd(__A, __B), |
| (__v4df)_mm256_setzero_pd()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_mul_ps(__A, __B), |
| (__v4sf)__W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS |
| _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { |
| return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, |
| (__v4sf)_mm_mul_ps(__A, __B), |
| (__v4sf)_mm_setzero_ps()); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_mul_ps(__A, __B), |
| (__v8sf)__W); |
| } |
| |
| static __inline__ __m256 __DEFAULT_FN_ATTRS |
| _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { |
| return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, |
| (__v8sf)_mm256_mul_ps(__A, __B), |
| (__v8sf)_mm256_setzero_ps()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_abs_epi32(__A), |
| (__v4si)__W); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) { |
| return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, |
| (__v4si)_mm_abs_epi32(__A), |
| (__v4si)_mm_setzero_si128()); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U, |
| (__v8si)_mm256_abs_epi32(__A), |
| (__v8si)__W); |
| } |
| |
| static __inline__ __m256i __DEFAULT_FN_ATTRS |
| _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) { |
| return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U, |
| (__v8si)_mm256_abs_epi32(__A), |
| (__v8si)_mm256_setzero_si256()); |
| } |
| |
| static __inline__ __m128i __DEFAULT_FN_ATTRS |
| _mm_abs_epi64 (__m128i __A) { |
|