| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64 |
| |
| |
| declare <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half>, <32 x half>, <32 x half>, i32) |
| |
| define <32 x half> @test_x86_vfnmadd_ph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_x86_vfnmadd_ph_z: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfnmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xac,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a1 |
| %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %a2) |
| ret <32 x half> %2 |
| } |
| |
| define <32 x half> @test_mask_vfnmadd_ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_vfnmadd_ph: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_vfnmadd_ph: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a1 |
| %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %a2) |
| %3 = bitcast i32 %mask to <32 x i1> |
| %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 |
| ret <32 x half> %4 |
| } |
| |
| define <32 x half> @test_x86_vfnmsubph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_x86_vfnmsubph_z: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfnmsub213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xae,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a1 |
| %2 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a2 |
| %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %2) |
| ret <32 x half> %3 |
| } |
| |
| define <32 x half> @test_mask_vfnmsub_ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_vfnmsub_ph: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_vfnmsub_ph: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a1 |
| %2 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a2 |
| %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %2) |
| %4 = bitcast i32 %mask to <32 x i1> |
| %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %a0 |
| ret <32 x half> %5 |
| } |
| |
| define <32 x half> @test_x86_vfmaddsubph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_x86_vfmaddsubph_z: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xa6,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) #2 |
| ret <32 x half> %res |
| } |
| |
| define <32 x half> @test_mask_fmaddsub_ph(<32 x half> %a, <32 x half> %b, <32 x half> %c, i32 %mask) { |
| ; X86-LABEL: test_mask_fmaddsub_ph: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_fmaddsub_ph: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a, <32 x half> %b, <32 x half> %c, i32 4) |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a |
| ret <32 x half> %sel |
| } |
| |
| declare <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half>, <32 x half>, <32 x half>, i32) nounwind readnone |
| |
| define <32 x half>@test_int_x86_avx512_mask_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) |
| %bc = bitcast i32 %x3 to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask3_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmaddsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb6,0xd1] |
| ; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmaddsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb6,0xd1] |
| ; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) |
| %bc = bitcast i32 %x3 to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x2 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half>@test_int_x86_avx512_maskz_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa6,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa6,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) |
| %bc = bitcast i32 %x3 to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> zeroinitializer |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask3_vfmsubadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmsubadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb7,0xd1] |
| ; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmsubadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb7,0xd1] |
| ; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %neg = fneg <32 x half> %x2 |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %neg, i32 4) |
| %bc = bitcast i32 %x3 to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x2 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rne(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rne: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x19,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rne: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x19,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 8) nounwind |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtn(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtn: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x39,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtn: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x39,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 9) nounwind |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtp(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtp: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x59,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtp: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x59,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 10) nounwind |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtz: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x79,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtz: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x79,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 11) nounwind |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrb_current(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { |
| ; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_current: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_current: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) nounwind |
| %bc = bitcast i32 %mask to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rne(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rne: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmadd213ph {rn-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x18,0xa8,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 8) nounwind |
| ret <32 x half> %res |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtn(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtn: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmadd213ph {rd-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x38,0xa8,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 9) nounwind |
| ret <32 x half> %res |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtp(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtp: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmadd213ph {ru-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x58,0xa8,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 10) nounwind |
| ret <32 x half> %res |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtz: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmadd213ph {rz-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x78,0xa8,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 11) nounwind |
| ret <32 x half> %res |
| } |
| |
| define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_current(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { |
| ; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_current: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xa8,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) nounwind |
| ret <32 x half> %res |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask3_vfmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xba,0xd1] |
| ; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xba,0xd1] |
| ; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x2 |
| %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %1) |
| %3 = bitcast i32 %x3 to <32 x i1> |
| %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %x2 |
| ret <32 x half> %4 |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) |
| %bc = bitcast i32 %x3 to <32 x i1> |
| %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x0 |
| ret <32 x half> %sel |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask3_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb8,0xd1] |
| ; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb8,0xd1] |
| ; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2) |
| %2 = bitcast i32 %x3 to <32 x i1> |
| %3 = select <32 x i1> %2, <32 x half> %1, <32 x half> %x2 |
| ret <32 x half> %3 |
| } |
| |
| define <32 x half> @test_int_x86_avx512_maskz_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3) { |
| ; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa8,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa8,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2) |
| %2 = bitcast i32 %x3 to <32 x i1> |
| %3 = select <32 x i1> %2, <32 x half> %1, <32 x half> zeroinitializer |
| ret <32 x half> %3 |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask_vfnmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x1 |
| %2 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x2 |
| %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %1, <32 x half> %2) |
| %4 = bitcast i32 %x3 to <32 x i1> |
| %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %x0 |
| ret <32 x half> %5 |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask3_vfnmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfnmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xbe,0xd1] |
| ; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfnmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xbe,0xd1] |
| ; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x0 |
| %2 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x2 |
| %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %x1, <32 x half> %2) |
| %4 = bitcast i32 %x3 to <32 x i1> |
| %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %x2 |
| ret <32 x half> %5 |
| } |
| |
| define <32 x half>@test_int_x86_avx512_mask_vfnmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ |
| ; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_512: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_512: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %x1 |
| %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %1, <32 x half> %x2) |
| %3 = bitcast i32 %x3 to <32 x i1> |
| %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %x0 |
| ret <32 x half> %4 |
| } |
| |
| define <32 x half> @test_x86_fma_vfnmadd_ph_512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) #0 { |
| ; CHECK-LABEL: test_x86_fma_vfnmadd_ph_512: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfnmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xac,0xc2] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a0 |
| %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %a1, <32 x half> %a2) |
| ret <32 x half> %2 |
| } |
| |
| define <32 x half> @test_x86_fma_vfnmsub_ph_512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) #0 { |
| ; CHECK-LABEL: test_x86_fma_vfnmsub_ph_512: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfnmsub213ph %zmm0, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xae,0xc0] |
| ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] |
| %1 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a0 |
| %2 = fsub <32 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %a0 |
| %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %a1, <32 x half> %2) |
| ret <32 x half> %3 |
| } |
| |
| define <8 x half>@test_int_x86_avx512_mask3_vfmadd_sh(<8 x half> %x0, <8 x half> %x1, half *%ptr_b, i8 %x3, i32 %x4) { |
| ; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_sh: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] |
| ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x08] |
| ; X86-NEXT: vfmadd231sh (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x08] |
| ; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_sh: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce] |
| ; X64-NEXT: vfmadd231sh (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x0f] |
| ; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %q = load half, half* %ptr_b |
| %vecinit.i = insertelement <8 x half> undef, half %q, i32 0 |
| %1 = extractelement <8 x half> %x0, i64 0 |
| %2 = extractelement <8 x half> %vecinit.i, i64 0 |
| %3 = extractelement <8 x half> %x1, i64 0 |
| %4 = call half @llvm.fma.f16(half %1, half %2, half %3) |
| %5 = bitcast i8 %x3 to <8 x i1> |
| %6 = extractelement <8 x i1> %5, i64 0 |
| %7 = select i1 %6, half %4, half %3 |
| %8 = insertelement <8 x half> %x1, half %7, i64 0 |
| ret <8 x half> %8 |
| } |
| |
| define <8 x half>@test_int_x86_avx512_maskz_vfmadd_sh(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3, i32 %x4 ){ |
| ; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_sh: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] |
| ; X86-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa9,0xc2] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_sh: |
| ; X64: # %bb.0: |
| ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] |
| ; X64-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa9,0xc2] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %1 = extractelement <8 x half> %x0, i64 0 |
| %2 = extractelement <8 x half> %x1, i64 0 |
| %3 = extractelement <8 x half> %x2, i64 0 |
| %4 = call half @llvm.fma.f16(half %1, half %2, half %3) |
| %5 = bitcast i8 %x3 to <8 x i1> |
| %6 = extractelement <8 x i1> %5, i64 0 |
| %7 = select i1 %6, half %4, half 0.000000e+00 |
| %8 = insertelement <8 x half> %x0, half %7, i64 0 |
| %9 = extractelement <8 x half> %x0, i64 0 |
| %10 = extractelement <8 x half> %x1, i64 0 |
| %11 = extractelement <8 x half> %x2, i64 0 |
| %12 = call half @llvm.x86.avx512fp16.vfmadd.f16(half %9, half %10, half %11, i32 3) |
| %13 = bitcast i8 %x3 to <8 x i1> |
| %14 = extractelement <8 x i1> %13, i64 0 |
| %15 = select i1 %14, half %12, half 0.000000e+00 |
| %16 = insertelement <8 x half> %x0, half %15, i64 0 |
| %res2 = fadd <8 x half> %8, %16 |
| ret <8 x half> %8 |
| } |
| |
| define void @fmadd_sh_mask_memfold(half* %a, half* %b, i8 %c) { |
| ; X86-LABEL: fmadd_sh_mask_memfold: |
| ; X86: # %bb.0: |
| ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x0c] |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04] |
| ; X86-NEXT: vmovsh (%ecx), %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x01] |
| ; X86-NEXT: vmovsh (%eax), %xmm1 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x08] |
| ; X86-NEXT: vfmadd213sh %xmm0, %xmm0, %xmm1 # encoding: [0x62,0xf6,0x7d,0x08,0xa9,0xc8] |
| ; X86-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xc1] |
| ; X86-NEXT: vmovsh %xmm0, (%ecx) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x01] |
| ; X86-NEXT: retl # encoding: [0xc3] |
| ; |
| ; X64-LABEL: fmadd_sh_mask_memfold: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vmovsh (%rdi), %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x07] |
| ; X64-NEXT: vmovsh (%rsi), %xmm1 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x0e] |
| ; X64-NEXT: vfmadd213sh %xmm0, %xmm0, %xmm1 # encoding: [0x62,0xf6,0x7d,0x08,0xa9,0xc8] |
| ; X64-NEXT: kmovd %edx, %k1 # encoding: [0xc5,0xfb,0x92,0xca] |
| ; X64-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xc1] |
| ; X64-NEXT: vmovsh %xmm0, (%rdi) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x07] |
| ; X64-NEXT: retq # encoding: [0xc3] |
| %a.val = load half, half* %a |
| %av0 = insertelement <8 x half> undef, half %a.val, i32 0 |
| %av1 = insertelement <8 x half> %av0, half 0.000000e+00, i32 1 |
| %av2 = insertelement <8 x half> %av1, half 0.000000e+00, i32 2 |
| %av3 = insertelement <8 x half> %av2, half 0.000000e+00, i32 3 |
| %av4 = insertelement <8 x half> %av3, half 0.000000e+00, i32 4 |
| %av5 = insertelement <8 x half> %av4, half 0.000000e+00, i32 5 |
| %av6 = insertelement <8 x half> %av5, half 0.000000e+00, i32 6 |
| %av = insertelement <8 x half> %av6, half 0.000000e+00, i32 7 |
| |
| %b.val = load half, half* %b |
| %bv0 = insertelement <8 x half> undef, half %b.val, i32 0 |
| %bv1 = insertelement <8 x half> %bv0, half 0.000000e+00, i32 1 |
| %bv2 = insertelement <8 x half> %bv1, half 0.000000e+00, i32 2 |
| %bv3 = insertelement <8 x half> %bv2, half 0.000000e+00, i32 3 |
| %bv4 = insertelement <8 x half> %bv3, half 0.000000e+00, i32 4 |
| %bv5 = insertelement <8 x half> %bv4, half 0.000000e+00, i32 5 |
| %bv6 = insertelement <8 x half> %bv5, half 0.000000e+00, i32 6 |
| %bv = insertelement <8 x half> %bv6, half 0.000000e+00, i32 7 |
| %1 = extractelement <8 x half> %av, i64 0 |
| %2 = extractelement <8 x half> %bv, i64 0 |
| %3 = extractelement <8 x half> %av, i64 0 |
| %4 = call half @llvm.fma.f16(half %1, half %2, half %3) |
| %5 = bitcast i8 %c to <8 x i1> |
| %6 = extractelement <8 x i1> %5, i64 0 |
| %7 = select i1 %6, half %4, half %1 |
| %8 = insertelement <8 x half> %av, half %7, i64 0 |
| %sr = extractelement <8 x half> %8, i32 0 |
| store half %sr, half* %a |
| ret void |
| } |
| |
| declare half @llvm.fma.f16(half, half, half) |
| declare half @llvm.x86.avx512fp16.vfmadd.f16(half, half, half, i32) |
| |
| declare <32 x half> @llvm.fma.v32f16(<32 x half>, <32 x half>, <32 x half>) |