| /*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------=== |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to deal |
| * in the Software without restriction, including without limitation the rights |
| * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| * copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in |
| * all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| * THE SOFTWARE. |
| * |
| *===-----------------------------------------------------------------------=== |
| */ |
| |
| #ifndef __ARM_ACLE_H |
| #define __ARM_ACLE_H |
| |
| #ifndef __ARM_ACLE |
| #error "ACLE intrinsics support not enabled." |
| #endif |
| |
| #include <stdint.h> |
| |
| #if defined(__cplusplus) |
| extern "C" { |
| #endif |
| |
| /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */ |
| /* 8.3 Memory barriers */ |
| #if !defined(_MSC_VER) |
| #define __dmb(i) __builtin_arm_dmb(i) |
| #define __dsb(i) __builtin_arm_dsb(i) |
| #define __isb(i) __builtin_arm_isb(i) |
| #endif |
| |
| /* 8.4 Hints */ |
| |
| #if !defined(_MSC_VER) |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) { |
| __builtin_arm_wfi(); |
| } |
| |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) { |
| __builtin_arm_wfe(); |
| } |
| |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) { |
| __builtin_arm_sev(); |
| } |
| |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) { |
| __builtin_arm_sevl(); |
| } |
| |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) { |
| __builtin_arm_yield(); |
| } |
| #endif |
| |
| #if __ARM_32BIT_STATE |
| #define __dbg(t) __builtin_arm_dbg(t) |
| #endif |
| |
| /* 8.5 Swap */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __swp(uint32_t __x, volatile uint32_t *__p) { |
| uint32_t v; |
| do |
| v = __builtin_arm_ldrex(__p); |
| while (__builtin_arm_strex(__x, __p)); |
| return v; |
| } |
| |
| /* 8.6 Memory prefetch intrinsics */ |
| /* 8.6.1 Data prefetch */ |
| #define __pld(addr) __pldx(0, 0, 0, addr) |
| |
| #if __ARM_32BIT_STATE |
| #define __pldx(access_kind, cache_level, retention_policy, addr) \ |
| __builtin_arm_prefetch(addr, access_kind, 1) |
| #else |
| #define __pldx(access_kind, cache_level, retention_policy, addr) \ |
| __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1) |
| #endif |
| |
| /* 8.6.2 Instruction prefetch */ |
| #define __pli(addr) __plix(0, 0, addr) |
| |
| #if __ARM_32BIT_STATE |
| #define __plix(cache_level, retention_policy, addr) \ |
| __builtin_arm_prefetch(addr, 0, 0) |
| #else |
| #define __plix(cache_level, retention_policy, addr) \ |
| __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0) |
| #endif |
| |
| /* 8.7 NOP */ |
| static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { |
| __builtin_arm_nop(); |
| } |
| |
| /* 9 DATA-PROCESSING INTRINSICS */ |
| /* 9.2 Miscellaneous data-processing intrinsics */ |
| /* ROR */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __ror(uint32_t __x, uint32_t __y) { |
| __y %= 32; |
| if (__y == 0) |
| return __x; |
| return (__x >> __y) | (__x << (32 - __y)); |
| } |
| |
| static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) |
| __rorll(uint64_t __x, uint32_t __y) { |
| __y %= 64; |
| if (__y == 0) |
| return __x; |
| return (__x >> __y) | (__x << (64 - __y)); |
| } |
| |
| static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) |
| __rorl(unsigned long __x, uint32_t __y) { |
| #if __SIZEOF_LONG__ == 4 |
| return __ror(__x, __y); |
| #else |
| return __rorll(__x, __y); |
| #endif |
| } |
| |
| |
| /* CLZ */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __clz(uint32_t __t) { |
| return __builtin_clz(__t); |
| } |
| |
| static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) |
| __clzl(unsigned long __t) { |
| return __builtin_clzl(__t); |
| } |
| |
| static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) |
| __clzll(uint64_t __t) { |
| return __builtin_clzll(__t); |
| } |
| |
| /* REV */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __rev(uint32_t __t) { |
| return __builtin_bswap32(__t); |
| } |
| |
| static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) |
| __revl(unsigned long __t) { |
| #if __SIZEOF_LONG__ == 4 |
| return __builtin_bswap32(__t); |
| #else |
| return __builtin_bswap64(__t); |
| #endif |
| } |
| |
| static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) |
| __revll(uint64_t __t) { |
| return __builtin_bswap64(__t); |
| } |
| |
| /* REV16 */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __rev16(uint32_t __t) { |
| return __ror(__rev(__t), 16); |
| } |
| |
| static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) |
| __rev16ll(uint64_t __t) { |
| return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t); |
| } |
| |
| static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) |
| __rev16l(unsigned long __t) { |
| #if __SIZEOF_LONG__ == 4 |
| return __rev16(__t); |
| #else |
| return __rev16ll(__t); |
| #endif |
| } |
| |
| /* REVSH */ |
| static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) |
| __revsh(int16_t __t) { |
| return __builtin_bswap16(__t); |
| } |
| |
| /* RBIT */ |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __rbit(uint32_t __t) { |
| return __builtin_arm_rbit(__t); |
| } |
| |
| static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) |
| __rbitll(uint64_t __t) { |
| #if __ARM_32BIT_STATE |
| return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | |
| __builtin_arm_rbit(__t >> 32); |
| #else |
| return __builtin_arm_rbit64(__t); |
| #endif |
| } |
| |
| static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) |
| __rbitl(unsigned long __t) { |
| #if __SIZEOF_LONG__ == 4 |
| return __rbit(__t); |
| #else |
| return __rbitll(__t); |
| #endif |
| } |
| |
| /* |
| * 9.3 16-bit multiplications |
| */ |
| #if __ARM_FEATURE_DSP |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smulbb(int32_t __a, int32_t __b) { |
| return __builtin_arm_smulbb(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smulbt(int32_t __a, int32_t __b) { |
| return __builtin_arm_smulbt(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smultb(int32_t __a, int32_t __b) { |
| return __builtin_arm_smultb(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smultt(int32_t __a, int32_t __b) { |
| return __builtin_arm_smultt(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smulwb(int32_t __a, int32_t __b) { |
| return __builtin_arm_smulwb(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) |
| __smulwt(int32_t __a, int32_t __b) { |
| return __builtin_arm_smulwt(__a, __b); |
| } |
| #endif |
| |
| /* |
| * 9.4 Saturating intrinsics |
| * |
| * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag |
| * intrinsics are implemented and the flag is enabled. |
| */ |
| /* 9.4.1 Width-specified saturation intrinsics */ |
| #if __ARM_FEATURE_SAT |
| #define __ssat(x, y) __builtin_arm_ssat(x, y) |
| #define __usat(x, y) __builtin_arm_usat(x, y) |
| #endif |
| |
| /* 9.4.2 Saturating addition and subtraction intrinsics */ |
| #if __ARM_FEATURE_DSP |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __qadd(int32_t __t, int32_t __v) { |
| return __builtin_arm_qadd(__t, __v); |
| } |
| |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __qsub(int32_t __t, int32_t __v) { |
| return __builtin_arm_qsub(__t, __v); |
| } |
| |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __qdbl(int32_t __t) { |
| return __builtin_arm_qadd(__t, __t); |
| } |
| #endif |
| |
| /* 9.4.3 Accumultating multiplications */ |
| #if __ARM_FEATURE_DSP |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlabb(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlabb(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlabt(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlabt(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlatb(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlatb(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlatt(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlatt(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlawb(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlawb(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlawt(int32_t __a, int32_t __b, int32_t __c) { |
| return __builtin_arm_smlawt(__a, __b, __c); |
| } |
| #endif |
| |
| |
| /* 9.5.4 Parallel 16-bit saturation */ |
| #if __ARM_FEATURE_SIMD32 |
| #define __ssat16(x, y) __builtin_arm_ssat16(x, y) |
| #define __usat16(x, y) __builtin_arm_usat16(x, y) |
| #endif |
| |
| /* 9.5.5 Packing and unpacking */ |
| #if __ARM_FEATURE_SIMD32 |
| typedef int32_t int8x4_t; |
| typedef int32_t int16x2_t; |
| typedef uint32_t uint8x4_t; |
| typedef uint32_t uint16x2_t; |
| |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __sxtab16(int16x2_t __a, int8x4_t __b) { |
| return __builtin_arm_sxtab16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __sxtb16(int8x4_t __a) { |
| return __builtin_arm_sxtb16(__a); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uxtab16(int16x2_t __a, int8x4_t __b) { |
| return __builtin_arm_uxtab16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uxtb16(int8x4_t __a) { |
| return __builtin_arm_uxtb16(__a); |
| } |
| #endif |
| |
| /* 9.5.6 Parallel selection */ |
| #if __ARM_FEATURE_SIMD32 |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __sel(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_sel(__a, __b); |
| } |
| #endif |
| |
| /* 9.5.7 Parallel 8-bit addition and subtraction */ |
| #if __ARM_FEATURE_SIMD32 |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __qadd8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_qadd8(__a, __b); |
| } |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __qsub8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_qsub8(__a, __b); |
| } |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __sadd8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_sadd8(__a, __b); |
| } |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __shadd8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_shadd8(__a, __b); |
| } |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __shsub8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_shsub8(__a, __b); |
| } |
| static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __ssub8(int8x4_t __a, int8x4_t __b) { |
| return __builtin_arm_ssub8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __uadd8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_uadd8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __uhadd8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_uhadd8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __uhsub8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_uhsub8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __uqadd8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_uqadd8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __uqsub8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_uqsub8(__a, __b); |
| } |
| static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) |
| __usub8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_usub8(__a, __b); |
| } |
| #endif |
| |
| /* 9.5.8 Sum of 8-bit absolute differences */ |
| #if __ARM_FEATURE_SIMD32 |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __usad8(uint8x4_t __a, uint8x4_t __b) { |
| return __builtin_arm_usad8(__a, __b); |
| } |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { |
| return __builtin_arm_usada8(__a, __b, __c); |
| } |
| #endif |
| |
| /* 9.5.9 Parallel 16-bit addition and subtraction */ |
| #if __ARM_FEATURE_SIMD32 |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __qadd16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_qadd16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __qasx(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_qasx(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __qsax(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_qsax(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __qsub16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_qsub16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __sadd16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_sadd16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __sasx(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_sasx(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __shadd16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_shadd16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __shasx(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_shasx(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __shsax(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_shsax(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __shsub16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_shsub16(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __ssax(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_ssax(__a, __b); |
| } |
| static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __ssub16(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_ssub16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uadd16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uadd16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uasx(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uasx(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uhadd16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uhadd16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uhasx(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uhasx(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uhsax(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uhsax(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uhsub16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uhsub16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uqadd16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uqadd16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uqasx(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uqasx(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uqsax(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uqsax(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __uqsub16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_uqsub16(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __usax(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_usax(__a, __b); |
| } |
| static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) |
| __usub16(uint16x2_t __a, uint16x2_t __b) { |
| return __builtin_arm_usub16(__a, __b); |
| } |
| #endif |
| |
| /* 9.5.10 Parallel 16-bit multiplications */ |
| #if __ARM_FEATURE_SIMD32 |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { |
| return __builtin_arm_smlad(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smladx(int16x2_t __a, int16x2_t __b, int32_t __c) { |
| return __builtin_arm_smladx(__a, __b, __c); |
| } |
| static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) |
| __smlald(int16x2_t __a, int16x2_t __b, int64_t __c) { |
| return __builtin_arm_smlald(__a, __b, __c); |
| } |
| static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) |
| __smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) { |
| return __builtin_arm_smlaldx(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) { |
| return __builtin_arm_smlsd(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) { |
| return __builtin_arm_smlsdx(__a, __b, __c); |
| } |
| static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) |
| __smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) { |
| return __builtin_arm_smlsld(__a, __b, __c); |
| } |
| static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) |
| __smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) { |
| return __builtin_arm_smlsldx(__a, __b, __c); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smuad(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_smuad(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smuadx(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_smuadx(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smusd(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_smusd(__a, __b); |
| } |
| static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) |
| __smusdx(int16x2_t __a, int16x2_t __b) { |
| return __builtin_arm_smusdx(__a, __b); |
| } |
| #endif |
| |
| /* 9.7 CRC32 intrinsics */ |
| #if __ARM_FEATURE_CRC32 |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32b(uint32_t __a, uint8_t __b) { |
| return __builtin_arm_crc32b(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32h(uint32_t __a, uint16_t __b) { |
| return __builtin_arm_crc32h(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32w(uint32_t __a, uint32_t __b) { |
| return __builtin_arm_crc32w(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32d(uint32_t __a, uint64_t __b) { |
| return __builtin_arm_crc32d(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32cb(uint32_t __a, uint8_t __b) { |
| return __builtin_arm_crc32cb(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32ch(uint32_t __a, uint16_t __b) { |
| return __builtin_arm_crc32ch(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32cw(uint32_t __a, uint32_t __b) { |
| return __builtin_arm_crc32cw(__a, __b); |
| } |
| |
| static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) |
| __crc32cd(uint32_t __a, uint64_t __b) { |
| return __builtin_arm_crc32cd(__a, __b); |
| } |
| #endif |
| |
| /* 10.1 Special register intrinsics */ |
| #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) |
| #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) |
| #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) |
| #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) |
| #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) |
| #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) |
| |
| #if defined(__cplusplus) |
| } |
| #endif |
| |
| #endif /* __ARM_ACLE_H */ |