| /*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== |
| * |
| * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| * See https://llvm.org/LICENSE.txt for license information. |
| * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| * |
| *===-----------------------------------------------------------------------=== |
| */ |
| #ifndef __IMMINTRIN_H |
| #error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead." |
| #endif |
| |
| #ifndef __AVX512FINTRIN_H |
| #define __AVX512FINTRIN_H |
| |
| typedef char __v64qi __attribute__((__vector_size__(64))); |
| typedef short __v32hi __attribute__((__vector_size__(64))); |
| typedef double __v8df __attribute__((__vector_size__(64))); |
| typedef float __v16sf __attribute__((__vector_size__(64))); |
| typedef long long __v8di __attribute__((__vector_size__(64))); |
| typedef int __v16si __attribute__((__vector_size__(64))); |
| |
| /* Unsigned types */ |
| typedef unsigned char __v64qu __attribute__((__vector_size__(64))); |
| typedef unsigned short __v32hu __attribute__((__vector_size__(64))); |
| typedef unsigned long long __v8du __attribute__((__vector_size__(64))); |
| typedef unsigned int __v16su __attribute__((__vector_size__(64))); |
| |
| typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64))); |
| typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64))); |
| typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); |
| |
| typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1))); |
| typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1))); |
| typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1))); |
| |
| typedef unsigned char __mmask8; |
| typedef unsigned short __mmask16; |
| |
| /* Rounding mode macros. */ |
| #define _MM_FROUND_TO_NEAREST_INT 0x00 |
| #define _MM_FROUND_TO_NEG_INF 0x01 |
| #define _MM_FROUND_TO_POS_INF 0x02 |
| #define _MM_FROUND_TO_ZERO 0x03 |
| #define _MM_FROUND_CUR_DIRECTION 0x04 |
| |
| /* Constants for integer comparison predicates */ |
| typedef enum { |
| _MM_CMPINT_EQ, /* Equal */ |
| _MM_CMPINT_LT, /* Less than */ |
| _MM_CMPINT_LE, /* Less than or Equal */ |
| _MM_CMPINT_UNUSED, |
| _MM_CMPINT_NE, /* Not Equal */ |
| _MM_CMPINT_NLT, /* Not Less than */ |
| #define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ |
| _MM_CMPINT_NLE /* Not Less than or Equal */ |
| #define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ |
| } _MM_CMPINT_ENUM; |
| |
| typedef enum |
| { |
| _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, |
| _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, |
| _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, |
| _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, |
| _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, |
| _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, |
| _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, |
| _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, |
| _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, |
| _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, |
| _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, |
| _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, |
| _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, |
| _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, |
| _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, |
| _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, |
| _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, |
| _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, |
| _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, |
| _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, |
| _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, |
| _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, |
| _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, |
| _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, |
| _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, |
| _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, |
| _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, |
| _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, |
| _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, |
| _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, |
| _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, |
| _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, |
| _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, |
| _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, |
| _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, |
| _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, |
| _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, |
| _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, |
| _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, |
| _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, |
| _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, |
| _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, |
| _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, |
| _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, |
| _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, |
| _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, |
| _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, |
| _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, |
| _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, |
| _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, |
| _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, |
| _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, |
| _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, |
| _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, |
| _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, |
| _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, |
| _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, |
| _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, |
| _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, |
| _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, |
| _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, |
| _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, |
| _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, |
| _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, |
| _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, |
| _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, |
| _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, |
| _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, |
| _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, |
| _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, |
| _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, |
| _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, |
| _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, |
| _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, |
| _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, |
| _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, |
| _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, |
| _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, |
| _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, |
| _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, |
| _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, |
| _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, |
| _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, |
| _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, |
| _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, |
| _MM_PERM_DDDD = 0xFF |
| } _MM_PERM_ENUM; |
| |
| typedef enum |
| { |
| _MM_MANT_NORM_1_2, /* interval [1, 2) */ |
| _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ |
| _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ |
| _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ |
| } _MM_MANTISSA_NORM_ENUM; |
| |
| typedef enum |
| { |
| _MM_MANT_SIGN_src, /* sign = sign(SRC) */ |
| _MM_MANT_SIGN_zero, /* sign = 0 */ |
| _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ |
| } _MM_MANTISSA_SIGN_ENUM; |
| |
| /* Define the default attributes for the functions in this file. */ |
| #define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512))) |
| #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128))) |
| #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) |
| |
| /* Create vectors with repeated elements */ |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_setzero_si512(void) |
| { |
| return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; |
| } |
| |
| #define _mm512_setzero_epi32 _mm512_setzero_si512 |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_undefined_pd(void) |
| { |
| return (__m512d)__builtin_ia32_undef512(); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_undefined(void) |
| { |
| return (__m512)__builtin_ia32_undef512(); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_undefined_ps(void) |
| { |
| return (__m512)__builtin_ia32_undef512(); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_undefined_epi32(void) |
| { |
| return (__m512i)__builtin_ia32_undef512(); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_broadcastd_epi32 (__m128i __A) |
| { |
| return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A, |
| 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) |
| { |
| return (__m512i)__builtin_ia32_selectd_512(__M, |
| (__v16si) _mm512_broadcastd_epi32(__A), |
| (__v16si) __O); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) |
| { |
| return (__m512i)__builtin_ia32_selectd_512(__M, |
| (__v16si) _mm512_broadcastd_epi32(__A), |
| (__v16si) _mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_broadcastq_epi64 (__m128i __A) |
| { |
| return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A, |
| 0, 0, 0, 0, 0, 0, 0, 0); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A) |
| { |
| return (__m512i)__builtin_ia32_selectq_512(__M, |
| (__v8di) _mm512_broadcastq_epi64(__A), |
| (__v8di) __O); |
| |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) |
| { |
| return (__m512i)__builtin_ia32_selectq_512(__M, |
| (__v8di) _mm512_broadcastq_epi64(__A), |
| (__v8di) _mm512_setzero_si512()); |
| } |
| |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_setzero_ps(void) |
| { |
| return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, |
| 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; |
| } |
| |
| #define _mm512_setzero _mm512_setzero_ps |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_setzero_pd(void) |
| { |
| return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_set1_ps(float __w) |
| { |
| return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w }; |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_set1_pd(double __w) |
| { |
| return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set1_epi8(char __w) |
| { |
| return __extension__ (__m512i)(__v64qi){ |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set1_epi16(short __w) |
| { |
| return __extension__ (__m512i)(__v32hi){ |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w, |
| __w, __w, __w, __w, __w, __w, __w, __w }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set1_epi32(int __s) |
| { |
| return __extension__ (__m512i)(__v16si){ |
| __s, __s, __s, __s, __s, __s, __s, __s, |
| __s, __s, __s, __s, __s, __s, __s, __s }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_set1_epi32(__mmask16 __M, int __A) |
| { |
| return (__m512i)__builtin_ia32_selectd_512(__M, |
| (__v16si)_mm512_set1_epi32(__A), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set1_epi64(long long __d) |
| { |
| return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_set1_epi64(__mmask8 __M, long long __A) |
| { |
| return (__m512i)__builtin_ia32_selectq_512(__M, |
| (__v8di)_mm512_set1_epi64(__A), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_broadcastss_ps(__m128 __A) |
| { |
| return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A, |
| 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set4_epi32 (int __A, int __B, int __C, int __D) |
| { |
| return __extension__ (__m512i)(__v16si) |
| { __D, __C, __B, __A, __D, __C, __B, __A, |
| __D, __C, __B, __A, __D, __C, __B, __A }; |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_set4_epi64 (long long __A, long long __B, long long __C, |
| long long __D) |
| { |
| return __extension__ (__m512i) (__v8di) |
| { __D, __C, __B, __A, __D, __C, __B, __A }; |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_set4_pd (double __A, double __B, double __C, double __D) |
| { |
| return __extension__ (__m512d) |
| { __D, __C, __B, __A, __D, __C, __B, __A }; |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_set4_ps (float __A, float __B, float __C, float __D) |
| { |
| return __extension__ (__m512) |
| { __D, __C, __B, __A, __D, __C, __B, __A, |
| __D, __C, __B, __A, __D, __C, __B, __A }; |
| } |
| |
| #define _mm512_setr4_epi32(e0,e1,e2,e3) \ |
| _mm512_set4_epi32((e3),(e2),(e1),(e0)) |
| |
| #define _mm512_setr4_epi64(e0,e1,e2,e3) \ |
| _mm512_set4_epi64((e3),(e2),(e1),(e0)) |
| |
| #define _mm512_setr4_pd(e0,e1,e2,e3) \ |
| _mm512_set4_pd((e3),(e2),(e1),(e0)) |
| |
| #define _mm512_setr4_ps(e0,e1,e2,e3) \ |
| _mm512_set4_ps((e3),(e2),(e1),(e0)) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_broadcastsd_pd(__m128d __A) |
| { |
| return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A, |
| 0, 0, 0, 0, 0, 0, 0, 0); |
| } |
| |
| /* Cast between vector types */ |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_castpd256_pd512(__m256d __a) |
| { |
| return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_castps256_ps512(__m256 __a) |
| { |
| return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, |
| -1, -1, -1, -1, -1, -1, -1, -1); |
| } |
| |
| static __inline __m128d __DEFAULT_FN_ATTRS512 |
| _mm512_castpd512_pd128(__m512d __a) |
| { |
| return __builtin_shufflevector(__a, __a, 0, 1); |
| } |
| |
| static __inline __m256d __DEFAULT_FN_ATTRS512 |
| _mm512_castpd512_pd256 (__m512d __A) |
| { |
| return __builtin_shufflevector(__A, __A, 0, 1, 2, 3); |
| } |
| |
| static __inline __m128 __DEFAULT_FN_ATTRS512 |
| _mm512_castps512_ps128(__m512 __a) |
| { |
| return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); |
| } |
| |
| static __inline __m256 __DEFAULT_FN_ATTRS512 |
| _mm512_castps512_ps256 (__m512 __A) |
| { |
| return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_castpd_ps (__m512d __A) |
| { |
| return (__m512) (__A); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_castpd_si512 (__m512d __A) |
| { |
| return (__m512i) (__A); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_castpd128_pd512 (__m128d __A) |
| { |
| return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_castps_pd (__m512 __A) |
| { |
| return (__m512d) (__A); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_castps_si512 (__m512 __A) |
| { |
| return (__m512i) (__A); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_castps128_ps512 (__m128 __A) |
| { |
| return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_castsi128_si512 (__m128i __A) |
| { |
| return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_castsi256_si512 (__m256i __A) |
| { |
| return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_castsi512_ps (__m512i __A) |
| { |
| return (__m512) (__A); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_castsi512_pd (__m512i __A) |
| { |
| return (__m512d) (__A); |
| } |
| |
| static __inline __m128i __DEFAULT_FN_ATTRS512 |
| _mm512_castsi512_si128 (__m512i __A) |
| { |
| return (__m128i)__builtin_shufflevector(__A, __A , 0, 1); |
| } |
| |
| static __inline __m256i __DEFAULT_FN_ATTRS512 |
| _mm512_castsi512_si256 (__m512i __A) |
| { |
| return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3); |
| } |
| |
| static __inline__ __mmask16 __DEFAULT_FN_ATTRS |
| _mm512_int2mask(int __a) |
| { |
| return (__mmask16)__a; |
| } |
| |
| static __inline__ int __DEFAULT_FN_ATTRS |
| _mm512_mask2int(__mmask16 __a) |
| { |
| return (int)__a; |
| } |
| |
| /// Constructs a 512-bit floating-point vector of [8 x double] from a |
| /// 128-bit floating-point vector of [2 x double]. The lower 128 bits |
| /// contain the value of the source vector. The upper 384 bits are set |
| /// to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 128-bit vector of [2 x double]. |
| /// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits |
| /// contain the value of the parameter. The upper 384 bits are set to zero. |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_zextpd128_pd512(__m128d __a) |
| { |
| return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3); |
| } |
| |
| /// Constructs a 512-bit floating-point vector of [8 x double] from a |
| /// 256-bit floating-point vector of [4 x double]. The lower 256 bits |
| /// contain the value of the source vector. The upper 256 bits are set |
| /// to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 256-bit vector of [4 x double]. |
| /// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits |
| /// contain the value of the parameter. The upper 256 bits are set to zero. |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_zextpd256_pd512(__m256d __a) |
| { |
| return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7); |
| } |
| |
| /// Constructs a 512-bit floating-point vector of [16 x float] from a |
| /// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain |
| /// the value of the source vector. The upper 384 bits are set to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 128-bit vector of [4 x float]. |
| /// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits |
| /// contain the value of the parameter. The upper 384 bits are set to zero. |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_zextps128_ps512(__m128 __a) |
| { |
| return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7); |
| } |
| |
| /// Constructs a 512-bit floating-point vector of [16 x float] from a |
| /// 256-bit floating-point vector of [8 x float]. The lower 256 bits contain |
| /// the value of the source vector. The upper 256 bits are set to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 256-bit vector of [8 x float]. |
| /// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits |
| /// contain the value of the parameter. The upper 256 bits are set to zero. |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_zextps256_ps512(__m256 __a) |
| { |
| return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); |
| } |
| |
| /// Constructs a 512-bit integer vector from a 128-bit integer vector. |
| /// The lower 128 bits contain the value of the source vector. The upper |
| /// 384 bits are set to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 128-bit integer vector. |
| /// \returns A 512-bit integer vector. The lower 128 bits contain the value of |
| /// the parameter. The upper 384 bits are set to zero. |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_zextsi128_si512(__m128i __a) |
| { |
| return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3); |
| } |
| |
| /// Constructs a 512-bit integer vector from a 256-bit integer vector. |
| /// The lower 256 bits contain the value of the source vector. The upper |
| /// 256 bits are set to zero. |
| /// |
| /// \headerfile <x86intrin.h> |
| /// |
| /// This intrinsic has no corresponding instruction. |
| /// |
| /// \param __a |
| /// A 256-bit integer vector. |
| /// \returns A 512-bit integer vector. The lower 256 bits contain the value of |
| /// the parameter. The upper 256 bits are set to zero. |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_zextsi256_si512(__m256i __a) |
| { |
| return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7); |
| } |
| |
| /* Bitwise operators */ |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_and_epi32(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v16su)__a & (__v16su)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, |
| (__v16si) _mm512_and_epi32(__a, __b), |
| (__v16si) __src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (), |
| __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_and_epi64(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a & (__v8du)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k, |
| (__v8di) _mm512_and_epi64(__a, __b), |
| (__v8di) __src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (), |
| __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_andnot_si512 (__m512i __A, __m512i __B) |
| { |
| return (__m512i)(~(__v8du)__A & (__v8du)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_andnot_epi32 (__m512i __A, __m512i __B) |
| { |
| return (__m512i)(~(__v16su)__A & (__v16su)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, |
| (__v16si)_mm512_andnot_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(), |
| __U, __A, __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_andnot_epi64(__m512i __A, __m512i __B) |
| { |
| return (__m512i)(~(__v8du)__A & (__v8du)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_andnot_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(), |
| __U, __A, __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_or_epi32(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v16su)__a | (__v16su)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, |
| (__v16si)_mm512_or_epi32(__a, __b), |
| (__v16si)__src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_or_epi64(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a | (__v8du)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, |
| (__v8di)_mm512_or_epi64(__a, __b), |
| (__v8di)__src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_xor_epi32(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v16su)__a ^ (__v16su)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, |
| (__v16si)_mm512_xor_epi32(__a, __b), |
| (__v16si)__src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_xor_epi64(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a ^ (__v8du)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, |
| (__v8di)_mm512_xor_epi64(__a, __b), |
| (__v8di)__src); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) |
| { |
| return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_and_si512(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a & (__v8du)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_or_si512(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a | (__v8du)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_xor_si512(__m512i __a, __m512i __b) |
| { |
| return (__m512i)((__v8du)__a ^ (__v8du)__b); |
| } |
| |
| /* Arithmetic */ |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_add_pd(__m512d __a, __m512d __b) |
| { |
| return (__m512d)((__v8df)__a + (__v8df)__b); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_add_ps(__m512 __a, __m512 __b) |
| { |
| return (__m512)((__v16sf)__a + (__v16sf)__b); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mul_pd(__m512d __a, __m512d __b) |
| { |
| return (__m512d)((__v8df)__a * (__v8df)__b); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mul_ps(__m512 __a, __m512 __b) |
| { |
| return (__m512)((__v16sf)__a * (__v16sf)__b); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_sub_pd(__m512d __a, __m512d __b) |
| { |
| return (__m512d)((__v8df)__a - (__v8df)__b); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_sub_ps(__m512 __a, __m512 __b) |
| { |
| return (__m512)((__v16sf)__a - (__v16sf)__b); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_add_epi64 (__m512i __A, __m512i __B) |
| { |
| return (__m512i) ((__v8du) __A + (__v8du) __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_add_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_add_epi64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_sub_epi64 (__m512i __A, __m512i __B) |
| { |
| return (__m512i) ((__v8du) __A - (__v8du) __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_sub_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_sub_epi64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_add_epi32 (__m512i __A, __m512i __B) |
| { |
| return (__m512i) ((__v16su) __A + (__v16su) __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, |
| (__v16si)_mm512_add_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, |
| (__v16si)_mm512_add_epi32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_sub_epi32 (__m512i __A, __m512i __B) |
| { |
| return (__m512i) ((__v16su) __A - (__v16su) __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, |
| (__v16si)_mm512_sub_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, |
| (__v16si)_mm512_sub_epi32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| #define _mm512_max_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_max_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_max_round_pd((A), (B), (R)), \ |
| (__v8df)(W))) |
| |
| #define _mm512_maskz_max_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_max_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_max_pd(__m512d __A, __m512d __B) |
| { |
| return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_max_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_max_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| #define _mm512_max_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_max_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ |
| (__v16sf)(W))) |
| |
| #define _mm512_maskz_max_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_max_ps(__m512 __A, __m512 __B) |
| { |
| return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_max_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_max_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __W, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) _mm_setzero_ps (), |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| #define _mm_max_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_max_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_max_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __W, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) _mm_setzero_pd (), |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| #define _mm_max_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_max_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_max_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline __m512i |
| __DEFAULT_FN_ATTRS512 |
| _mm512_max_epi32(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_max_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_max_epi32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_max_epu32(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_max_epu32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_max_epu32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_max_epi64(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_max_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_max_epi64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_max_epu64(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_max_epu64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_max_epu64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| #define _mm512_min_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_min_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_min_round_pd((A), (B), (R)), \ |
| (__v8df)(W))) |
| |
| #define _mm512_maskz_min_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_min_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_min_pd(__m512d __A, __m512d __B) |
| { |
| return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_min_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_min_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| #define _mm512_min_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_min_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ |
| (__v16sf)(W))) |
| |
| #define _mm512_maskz_min_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_min_ps(__m512 __A, __m512 __B) |
| { |
| return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_min_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_min_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __W, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) _mm_setzero_ps (), |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| #define _mm_min_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_min_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_min_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __W, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) _mm_setzero_pd (), |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| #define _mm_min_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_min_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_min_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline __m512i |
| __DEFAULT_FN_ATTRS512 |
| _mm512_min_epi32(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_min_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_min_epi32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_min_epu32(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_min_epu32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_min_epu32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_min_epi64(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_min_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_min_epi64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_min_epu64(__m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_min_epu64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_min_epu64(__A, __B), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mul_epi32(__m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_mul_epi32(__X, __Y), |
| (__v8di)__W); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_mul_epi32(__X, __Y), |
| (__v8di)_mm512_setzero_si512 ()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mul_epu32(__m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_mul_epu32(__X, __Y), |
| (__v8di)__W); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, |
| (__v8di)_mm512_mul_epu32(__X, __Y), |
| (__v8di)_mm512_setzero_si512 ()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mullo_epi32 (__m512i __A, __m512i __B) |
| { |
| return (__m512i) ((__v16su) __A * (__v16su) __B); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_mullo_epi32(__A, __B), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) |
| { |
| return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, |
| (__v16si)_mm512_mullo_epi32(__A, __B), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mullox_epi64 (__m512i __A, __m512i __B) { |
| return (__m512i) ((__v8du) __A * (__v8du) __B); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_mullox_epi64(__A, __B), |
| (__v8di)__W); |
| } |
| |
| #define _mm512_sqrt_round_pd(A, R) \ |
| ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))) |
| |
| #define _mm512_mask_sqrt_round_pd(W, U, A, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_sqrt_round_pd((A), (R)), \ |
| (__v8df)(__m512d)(W))) |
| |
| #define _mm512_maskz_sqrt_round_pd(U, A, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_sqrt_round_pd((A), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_sqrt_pd(__m512d __A) |
| { |
| return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_sqrt_pd(__A), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) |
| { |
| return (__m512d)__builtin_ia32_selectpd_512(__U, |
| (__v8df)_mm512_sqrt_pd(__A), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| #define _mm512_sqrt_round_ps(A, R) \ |
| ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))) |
| |
| #define _mm512_mask_sqrt_round_ps(W, U, A, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ |
| (__v16sf)(__m512)(W))) |
| |
| #define _mm512_maskz_sqrt_round_ps(U, A, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_sqrt_ps(__m512 __A) |
| { |
| return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_sqrt_ps(__A), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) |
| { |
| return (__m512)__builtin_ia32_selectps_512(__U, |
| (__v16sf)_mm512_sqrt_ps(__A), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_rsqrt14_pd(__m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, |
| (__v8df) |
| _mm512_setzero_pd (), |
| (__mmask8) -1);} |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, |
| (__v8df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, |
| (__v8df) |
| _mm512_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_rsqrt14_ps(__m512 __A) |
| { |
| return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, |
| (__v16sf) |
| _mm512_setzero_ps (), |
| (__mmask16) -1); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, |
| (__v16sf) __W, |
| (__mmask16) __U); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, |
| (__v16sf) |
| _mm512_setzero_ps (), |
| (__mmask16) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_rsqrt14_ss(__m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_rsqrt14_sd(__m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, |
| (__v2df) __B, |
| (__v2df) _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_rcp14_pd(__m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, |
| (__v8df) |
| _mm512_setzero_pd (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, |
| (__v8df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, |
| (__v8df) |
| _mm512_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_rcp14_ps(__m512 __A) |
| { |
| return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, |
| (__v16sf) |
| _mm512_setzero_ps (), |
| (__mmask16) -1); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, |
| (__v16sf) __W, |
| (__mmask16) __U); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, |
| (__v16sf) |
| _mm512_setzero_ps (), |
| (__mmask16) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_rcp14_ss(__m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) |
| _mm_setzero_ps (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) |
| { |
| return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, |
| (__v4sf) __B, |
| (__v4sf) _mm_setzero_ps (), |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_rcp14_sd(__m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A, |
| (__v2df) __B, |
| (__v2df) |
| _mm_setzero_pd (), |
| (__mmask8) -1); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, |
| (__v2df) __B, |
| (__v2df) __W, |
| (__mmask8) __U); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) |
| { |
| return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, |
| (__v2df) __B, |
| (__v2df) _mm_setzero_pd (), |
| (__mmask8) __U); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_floor_ps(__m512 __A) |
| { |
| return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, |
| _MM_FROUND_FLOOR, |
| (__v16sf) __A, -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, |
| _MM_FROUND_FLOOR, |
| (__v16sf) __W, __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_floor_pd(__m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, |
| _MM_FROUND_FLOOR, |
| (__v8df) __A, -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, |
| _MM_FROUND_FLOOR, |
| (__v8df) __W, __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A) |
| { |
| return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, |
| _MM_FROUND_CEIL, |
| (__v16sf) __W, __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_ceil_ps(__m512 __A) |
| { |
| return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, |
| _MM_FROUND_CEIL, |
| (__v16sf) __A, -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_ceil_pd(__m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, |
| _MM_FROUND_CEIL, |
| (__v8df) __A, -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A) |
| { |
| return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, |
| _MM_FROUND_CEIL, |
| (__v8df) __W, __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_abs_epi64(__m512i __A) |
| { |
| return (__m512i)__builtin_ia32_pabsq512((__v8di)__A); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_abs_epi64(__A), |
| (__v8di)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A) |
| { |
| return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, |
| (__v8di)_mm512_abs_epi64(__A), |
| (__v8di)_mm512_setzero_si512()); |
| } |
| |
| static __inline __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_abs_epi32(__m512i __A) |
| { |
| return (__m512i)__builtin_ia32_pabsd512((__v16si) __A); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A) |
| { |
| return (__m512i)__builtin_ia32_selectd_512(__U, |
| (__v16si)_mm512_abs_epi32(__A), |
| (__v16si)__W); |
| } |
| |
| static __inline__ __m512i __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A) |
| { |
| return (__m512i)__builtin_ia32_selectd_512(__U, |
| (__v16si)_mm512_abs_epi32(__A), |
| (__v16si)_mm512_setzero_si512()); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_add_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_add_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); |
| } |
| |
| #define _mm_add_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_add_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_add_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_add_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_add_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); |
| } |
| #define _mm_add_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_add_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_add_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_add_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_add_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_add_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_add_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| #define _mm512_add_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_add_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_add_round_pd((A), (B), (R)), \ |
| (__v8df)(__m512d)(W))) |
| |
| #define _mm512_maskz_add_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_add_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| #define _mm512_add_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_add_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ |
| (__v16sf)(__m512)(W))) |
| |
| #define _mm512_maskz_add_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_sub_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_sub_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); |
| } |
| #define _mm_sub_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_sub_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_sub_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_sub_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_sub_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); |
| } |
| |
| #define _mm_sub_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_sub_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_sub_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_sub_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_sub_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_sub_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_sub_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| #define _mm512_sub_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_sub_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ |
| (__v8df)(__m512d)(W))) |
| |
| #define _mm512_maskz_sub_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| #define _mm512_sub_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_sub_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ |
| (__v16sf)(__m512)(W))) |
| |
| #define _mm512_maskz_sub_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_mul_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_mul_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); |
| } |
| #define _mm_mul_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_mul_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_mul_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_mul_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_mul_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); |
| } |
| |
| #define _mm_mul_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_mul_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_mul_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_mul_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_mul_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_mul_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_mul_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| #define _mm512_mul_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_mul_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ |
| (__v8df)(__m512d)(W))) |
| |
| #define _mm512_maskz_mul_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| #define _mm512_mul_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_mul_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ |
| (__v16sf)(__m512)(W))) |
| |
| #define _mm512_maskz_mul_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_div_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128 __DEFAULT_FN_ATTRS128 |
| _mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) { |
| __A = _mm_div_ss(__A, __B); |
| return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); |
| } |
| |
| #define _mm_div_round_ss(A, B, R) \ |
| ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_div_round_ss(W, U, A, B, R) \ |
| ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)(__m128)(W), (__mmask8)(U), \ |
| (int)(R))) |
| |
| #define _mm_maskz_div_round_ss(U, A, B, R) \ |
| ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ |
| (__v4sf)(__m128)(B), \ |
| (__v4sf)_mm_setzero_ps(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_div_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, __W); |
| } |
| |
| static __inline__ __m128d __DEFAULT_FN_ATTRS128 |
| _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) { |
| __A = _mm_div_sd(__A, __B); |
| return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); |
| } |
| |
| #define _mm_div_round_sd(A, B, R) \ |
| ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm_mask_div_round_sd(W, U, A, B, R) \ |
| ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)(__m128d)(W), \ |
| (__mmask8)(U), (int)(R))) |
| |
| #define _mm_maskz_div_round_sd(U, A, B, R) \ |
| ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ |
| (__v2df)(__m128d)(B), \ |
| (__v2df)_mm_setzero_pd(), \ |
| (__mmask8)(U), (int)(R))) |
| |
| static __inline __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_div_pd(__m512d __a, __m512d __b) |
| { |
| return (__m512d)((__v8df)__a/(__v8df)__b); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_div_pd(__A, __B), |
| (__v8df)__W); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) { |
| return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, |
| (__v8df)_mm512_div_pd(__A, __B), |
| (__v8df)_mm512_setzero_pd()); |
| } |
| |
| static __inline __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_div_ps(__m512 __a, __m512 __b) |
| { |
| return (__m512)((__v16sf)__a/(__v16sf)__b); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_div_ps(__A, __B), |
| (__v16sf)__W); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) { |
| return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, |
| (__v16sf)_mm512_div_ps(__A, __B), |
| (__v16sf)_mm512_setzero_ps()); |
| } |
| |
| #define _mm512_div_round_pd(A, B, R) \ |
| ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), (int)(R))) |
| |
| #define _mm512_mask_div_round_pd(W, U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_div_round_pd((A), (B), (R)), \ |
| (__v8df)(__m512d)(W))) |
| |
| #define _mm512_maskz_div_round_pd(U, A, B, R) \ |
| ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ |
| (__v8df)_mm512_div_round_pd((A), (B), (R)), \ |
| (__v8df)_mm512_setzero_pd())) |
| |
| #define _mm512_div_round_ps(A, B, R) \ |
| ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), (int)(R))) |
| |
| #define _mm512_mask_div_round_ps(W, U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ |
| (__v16sf)(__m512)(W))) |
| |
| #define _mm512_maskz_div_round_ps(U, A, B, R) \ |
| ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ |
| (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ |
| (__v16sf)_mm512_setzero_ps())) |
| |
| #define _mm512_roundscale_ps(A, B) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \ |
| (__v16sf)_mm512_undefined_ps(), \ |
| (__mmask16)-1, \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_mask_roundscale_ps(A, B, C, imm) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ |
| (__v16sf)(__m512)(A), (__mmask16)(B), \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_maskz_roundscale_ps(A, B, imm) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ |
| (__v16sf)_mm512_setzero_ps(), \ |
| (__mmask16)(A), \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ |
| (__v16sf)(__m512)(A), (__mmask16)(B), \ |
| (int)(R))) |
| |
| #define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ |
| (__v16sf)_mm512_setzero_ps(), \ |
| (__mmask16)(A), (int)(R))) |
| |
| #define _mm512_roundscale_round_ps(A, imm, R) \ |
| ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \ |
| (__v16sf)_mm512_undefined_ps(), \ |
| (__mmask16)-1, (int)(R))) |
| |
| #define _mm512_roundscale_pd(A, B) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \ |
| (__v8df)_mm512_undefined_pd(), \ |
| (__mmask8)-1, \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_mask_roundscale_pd(A, B, C, imm) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ |
| (__v8df)(__m512d)(A), (__mmask8)(B), \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_maskz_roundscale_pd(A, B, imm) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ |
| (__v8df)_mm512_setzero_pd(), \ |
| (__mmask8)(A), \ |
| _MM_FROUND_CUR_DIRECTION)) |
| |
| #define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ |
| (__v8df)(__m512d)(A), (__mmask8)(B), \ |
| (int)(R))) |
| |
| #define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ |
| (__v8df)_mm512_setzero_pd(), \ |
| (__mmask8)(A), (int)(R))) |
| |
| #define _mm512_roundscale_round_pd(A, imm, R) \ |
| ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \ |
| (__v8df)_mm512_undefined_pd(), \ |
| (__mmask8)-1, (int)(R))) |
| |
| #define _mm512_fmadd_round_pd(A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)-1, (int)(R))) |
| |
| |
| #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_fmsub_round_pd(A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| -(__v8df)(__m512d)(C), \ |
| (__mmask8)-1, (int)(R))) |
| |
| |
| #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| -(__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| -(__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_fnmadd_round_pd(A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)-1, (int)(R))) |
| |
| |
| #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| (__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| #define _mm512_fnmsub_round_pd(A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| -(__v8df)(__m512d)(C), \ |
| (__mmask8)-1, (int)(R))) |
| |
| |
| #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \ |
| ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ |
| (__v8df)(__m512d)(B), \ |
| -(__v8df)(__m512d)(C), \ |
| (__mmask8)(U), (int)(R))) |
| |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| (__v8df) __B, |
| -(__v8df) __C, |
| (__mmask8) -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| (__v8df) __B, |
| -(__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, |
| (__v8df) __B, |
| -(__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| -(__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, |
| (__v8df) __B, |
| (__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, |
| -(__v8df) __B, |
| -(__v8df) __C, |
| (__mmask8) -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512d __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) |
| { |
| return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, |
| (__v8df) __B, |
| -(__v8df) __C, |
| (__mmask8) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| #define _mm512_fmadd_round_ps(A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)-1, (int)(R))) |
| |
| |
| #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_fmsub_round_ps(A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| -(__v16sf)(__m512)(C), \ |
| (__mmask16)-1, (int)(R))) |
| |
| |
| #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| -(__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| -(__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_fnmadd_round_ps(A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| -(__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)-1, (int)(R))) |
| |
| |
| #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| (__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| #define _mm512_fnmsub_round_ps(A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ |
| -(__v16sf)(__m512)(B), \ |
| -(__v16sf)(__m512)(C), \ |
| (__mmask16)-1, (int)(R))) |
| |
| |
| #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \ |
| ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ |
| (__v16sf)(__m512)(B), \ |
| -(__v16sf)(__m512)(C), \ |
| (__mmask16)(U), (int)(R))) |
| |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) |
| { |
| return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, |
| (__v16sf) __B, |
| (__v16sf) __C, |
| (__mmask16) -1, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) |
| { |
| return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, |
| (__v16sf) __B, |
| (__v16sf) __C, |
| (__mmask16) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) |
| { |
| return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A, |
| (__v16sf) __B, |
| (__v16sf) __C, |
| (__mmask16) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) |
| { |
| return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, |
| (__v16sf) __B, |
| (__v16sf) __C, |
| (__mmask16) __U, |
| _MM_FROUND_CUR_DIRECTION); |
| } |
| |
| static __inline__ __m512 __DEFAULT_FN_ATTRS512 |
| _mm512_fmsub_ps(__m512 __A, |