| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=+false-deps-perm -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=ENABLE |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=-false-deps-perm -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=DISABLE |
| |
| define <4 x i64> @permq_ri_256(<4 x i64> %a0) { |
| ; ENABLE-LABEL: permq_ri_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,2,1,0] |
| ; ENABLE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_ri_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,2,1,0] |
| ; DISABLE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0> |
| %res = add <4 x i64> %2, %a0 |
| ret <4 x i64> %res |
| } |
| |
| define <4 x i64> @permq_rr_256(<4 x i64> %a0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permq_rr_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermq %ymm0, %ymm2, %ymm1 |
| ; ENABLE-NEXT: vpaddq %ymm2, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_rr_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; DISABLE-NEXT: vpermq %ymm0, %ymm2, %ymm1 |
| ; DISABLE-NEXT: vpaddq %ymm2, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> %a0, <4 x i64> %idx) |
| %t = add <4 x i64> %a0, %idx |
| %res = add <4 x i64> %t, %2 |
| ret <4 x i64> %res |
| } |
| |
| define <4 x i64> @permq_rm_256(ptr %p0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permq_rm_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermq (%rdi), %ymm0, %ymm1 |
| ; ENABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_rm_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermq (%rdi), %ymm0, %ymm1 |
| ; DISABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <4 x i64>, ptr %p0, align 64 |
| %2 = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> %a0, <4 x i64> %idx) |
| %res = add <4 x i64> %idx, %2 |
| ret <4 x i64> %res |
| } |
| |
| define <4 x i64> @permq_mi_256(ptr %p0) { |
| ; ENABLE-LABEL: permq_mi_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,2,0] |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_mi_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,2,0] |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <4 x i64>, ptr %p0, align 64 |
| %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 0> |
| ret <4 x i64> %2 |
| } |
| |
| define <4 x i64> @permq_broadcast_256(ptr %p0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permq_broadcast_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermq (%rdi){1to4}, %ymm1, %ymm0 |
| ; ENABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_broadcast_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermq (%rdi){1to4}, %ymm1, %ymm0 |
| ; DISABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load i64, ptr %p0, align 4 |
| %t0 = insertelement <4 x i64> undef, i64 %v0, i64 0 |
| %a0 = shufflevector <4 x i64> %t0, <4 x i64> undef, <4 x i32> zeroinitializer |
| %2 = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> %a0, <4 x i64> %idx) |
| %res = add <4 x i64> %2, %idx |
| ret <4 x i64> %res |
| } |
| |
| define <4 x i64> @permq_maskz_256(<4 x i64> %a0, <4 x i64> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permq_maskz_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermq %ymm0, %ymm1, %ymm2 |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vpaddq %ymm2, %ymm0, %ymm0 {%k1} |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_maskz_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermq %ymm0, %ymm1, %ymm2 |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vpaddq %ymm2, %ymm0, %ymm0 {%k1} |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> %idx, <4 x i64> zeroinitializer, i8 %2) |
| %t = add <4 x i64> %a0, %idx |
| %res = add <4 x i64> %3, %t |
| ret <4 x i64> %res |
| } |
| |
| declare <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64>, <4 x i64>) |
| declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) |
| |
| define <8 x i64> @permq_rr_512(<8 x i64> %a0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permq_rr_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermq %zmm0, %zmm2, %zmm1 |
| ; ENABLE-NEXT: vpaddq %zmm2, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_rr_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; DISABLE-NEXT: vpermq %zmm0, %zmm2, %zmm1 |
| ; DISABLE-NEXT: vpaddq %zmm2, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %a0, <8 x i64> %idx) |
| %t = add <8 x i64> %a0, %idx |
| %res = add <8 x i64> %t, %2 |
| ret <8 x i64> %res |
| } |
| |
| define <8 x i64> @permq_rm_512(ptr %p0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permq_rm_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermq (%rdi), %zmm0, %zmm1 |
| ; ENABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_rm_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermq (%rdi), %zmm0, %zmm1 |
| ; DISABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <8 x i64>, ptr %p0, align 64 |
| %2 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %a0, <8 x i64> %idx) |
| %res = add <8 x i64> %idx, %2 |
| ret <8 x i64> %res |
| } |
| |
| define <8 x i64> @permq_broadcast_512(ptr %p0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permq_broadcast_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermq (%rdi){1to8}, %zmm1, %zmm0 |
| ; ENABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_broadcast_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermq (%rdi){1to8}, %zmm1, %zmm0 |
| ; DISABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load i64, ptr %p0, align 4 |
| %t0 = insertelement <8 x i64> undef, i64 %v0, i64 0 |
| %a0 = shufflevector <8 x i64> %t0, <8 x i64> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %a0, <8 x i64> %idx) |
| %res = add <8 x i64> %2, %idx |
| ret <8 x i64> %res |
| } |
| |
| define <8 x i64> @permq_maskz_512(<8 x i64> %a0, <8 x i64> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permq_maskz_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermq %zmm0, %zmm1, %zmm2 |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vpaddq %zmm2, %zmm0, %zmm0 {%k1} |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permq_maskz_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermq %zmm0, %zmm1, %zmm2 |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vpaddq %zmm2, %zmm0, %zmm0 {%k1} |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> %idx, <8 x i64> zeroinitializer, i8 %2) |
| %t = add <8 x i64> %a0, %idx |
| %res = add <8 x i64> %3, %t |
| ret <8 x i64> %res |
| } |
| |
| declare <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64>, <8 x i64>) |
| declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) |
| |
| define <8 x i32> @permd_rr_256(<8 x i32> %a0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permd_rr_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermd %ymm0, %ymm2, %ymm1 |
| ; ENABLE-NEXT: vpaddd %ymm2, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_rr_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload |
| ; DISABLE-NEXT: vpermd %ymm0, %ymm2, %ymm1 |
| ; DISABLE-NEXT: vpaddd %ymm2, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> %idx, <8 x i32> undef, i8 -1) |
| %t = add <8 x i32> %a0, %idx |
| %res = add <8 x i32> %t, %2 |
| ret <8 x i32> %res |
| } |
| |
| define <8 x i32> @permd_rm_256(ptr %p0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permd_rm_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermd (%rdi), %ymm0, %ymm1 |
| ; ENABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_rm_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermd (%rdi), %ymm0, %ymm1 |
| ; DISABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <8 x i32>, ptr %p0, align 64 |
| %2 = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> %idx, <8 x i32> undef, i8 -1) |
| %res = add <8 x i32> %idx, %2 |
| ret <8 x i32> %res |
| } |
| |
| define <8 x i32> @permd_broadcast_256(ptr %p0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permd_broadcast_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermd (%rdi){1to8}, %ymm1, %ymm0 |
| ; ENABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_broadcast_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermd (%rdi){1to8}, %ymm1, %ymm0 |
| ; DISABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load i32, ptr %p0, align 4 |
| %t0 = insertelement <8 x i32> undef, i32 %v0, i32 0 |
| %a0 = shufflevector <8 x i32> %t0, <8 x i32> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> %idx, <8 x i32> zeroinitializer, i8 -1) |
| %res = add <8 x i32> %2, %idx |
| ret <8 x i32> %res |
| } |
| |
| define <8 x i32> @permd_maskz_256(<8 x i32> %a0, <8 x i32> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permd_maskz_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermd %ymm0, %ymm1, %ymm2 |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vpaddd %ymm2, %ymm0, %ymm0 {%k1} |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_maskz_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermd %ymm0, %ymm1, %ymm2 |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vpaddd %ymm2, %ymm0, %ymm0 {%k1} |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> %idx, <8 x i32> zeroinitializer, i8 %2) |
| %t = add <8 x i32> %a0, %idx |
| %res = add <8 x i32> %3, %t |
| ret <8 x i32> %res |
| } |
| |
| declare <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) |
| |
| define <16 x i32> @permd_rr_512(<16 x i32> %a0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permd_rr_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermd %zmm0, %zmm2, %zmm1 |
| ; ENABLE-NEXT: vpaddd %zmm2, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_rr_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload |
| ; DISABLE-NEXT: vpermd %zmm0, %zmm2, %zmm1 |
| ; DISABLE-NEXT: vpaddd %zmm2, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> %idx, <16 x i32> undef, i16 -1) |
| %t = add <16 x i32> %a0, %idx |
| %res = add <16 x i32> %t, %2 |
| ret <16 x i32> %res |
| } |
| |
| define <16 x i32> @permd_rm_512(ptr %p0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permd_rm_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermd (%rdi), %zmm0, %zmm1 |
| ; ENABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_rm_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermd (%rdi), %zmm0, %zmm1 |
| ; DISABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <16 x i32>, ptr %p0, align 64 |
| %2 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> %idx, <16 x i32> undef, i16 -1) |
| %res = add <16 x i32> %idx, %2 |
| ret <16 x i32> %res |
| } |
| |
| define <16 x i32> @permd_broadcast_512(ptr %p0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permd_broadcast_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermd (%rdi){1to16}, %zmm1, %zmm0 |
| ; ENABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_broadcast_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermd (%rdi){1to16}, %zmm1, %zmm0 |
| ; DISABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load i32, ptr %p0, align 4 |
| %t0 = insertelement <16 x i32> undef, i32 %v0, i32 0 |
| %a0 = shufflevector <16 x i32> %t0, <16 x i32> undef, <16 x i32> zeroinitializer |
| %2 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> %idx, <16 x i32> undef, i16 -1) |
| %res = add <16 x i32> %2, %idx |
| ret <16 x i32> %res |
| } |
| |
| define <16 x i32> @permd_maskz_512(<16 x i32> %a0, <16 x i32> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permd_maskz_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermd %zmm0, %zmm1, %zmm2 |
| ; ENABLE-NEXT: kmovw (%rdi), %k1 |
| ; ENABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vpaddd %zmm2, %zmm0, %zmm0 {%k1} |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permd_maskz_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermd %zmm0, %zmm1, %zmm2 |
| ; DISABLE-NEXT: kmovw (%rdi), %k1 |
| ; DISABLE-NEXT: vpaddd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vpaddd %zmm2, %zmm0, %zmm0 {%k1} |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i16, ptr %mask |
| %3 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> %idx, <16 x i32> zeroinitializer, i16 %2) |
| %t = add <16 x i32> %a0, %idx |
| %res = add <16 x i32> %3, %t |
| ret <16 x i32> %res |
| } |
| |
| declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) |
| |
| define <4 x double> @permpd_ri_256(<4 x double> %a0) { |
| ; ENABLE-LABEL: permpd_ri_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[1,2,1,0] |
| ; ENABLE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_ri_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[1,2,1,0] |
| ; DISABLE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0> |
| %res = fadd <4 x double> %2, %a0 |
| ret <4 x double> %res |
| } |
| |
| define <4 x double> @permpd_rr_256(<4 x double> %a0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_rr_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovapd %ymm0, %ymm2 |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermpd %ymm2, %ymm0, %ymm1 |
| ; ENABLE-NEXT: vcvtqq2pd %ymm0, %ymm0 |
| ; ENABLE-NEXT: vaddpd %ymm0, %ymm2, %ymm0 |
| ; ENABLE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_rr_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovapd %ymm0, %ymm2 |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; DISABLE-NEXT: vpermpd %ymm2, %ymm0, %ymm1 |
| ; DISABLE-NEXT: vcvtqq2pd %ymm0, %ymm0 |
| ; DISABLE-NEXT: vaddpd %ymm0, %ymm2, %ymm0 |
| ; DISABLE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <4 x double> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double> %1, <4 x i64> %idx) |
| %a1 = sitofp <4 x i64> %idx to <4 x double> |
| %t = fadd <4 x double> %1, %a1 |
| %res = fadd <4 x double> %2, %t |
| ret <4 x double> %res |
| } |
| |
| define <4 x double> @permpd_rm_256(ptr %p0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_rm_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd (%rdi), %ymm1, %ymm0 |
| ; ENABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_rm_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermpd (%rdi), %ymm1, %ymm0 |
| ; DISABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <4 x double>, ptr %p0, align 64 |
| %2 = call <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double> %a0, <4 x i64> %idx) |
| %a1 = sitofp <4 x i64> %idx to <4 x double> |
| %res = fadd <4 x double> %2, %a1 |
| ret <4 x double> %res |
| } |
| |
| define <4 x double> @permpd_mi_256(ptr %p0) { |
| ; ENABLE-LABEL: permpd_mi_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,2,0] |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_mi_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,2,0] |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <4 x double>, ptr %p0, align 64 |
| %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 0> |
| ret <4 x double> %2 |
| } |
| |
| define <4 x double> @permpd_broadcast_256(ptr %p0, <4 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_broadcast_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd (%rdi){1to4}, %ymm1, %ymm0 |
| ; ENABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_broadcast_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermpd (%rdi){1to4}, %ymm1, %ymm0 |
| ; DISABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load double, ptr %p0, align 4 |
| %t0 = insertelement <4 x double> undef, double %v0, i64 0 |
| %a0 = shufflevector <4 x double> %t0, <4 x double> undef, <4 x i32> zeroinitializer |
| %2 = call <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double> %a0, <4 x i64> %idx) |
| %a1 = sitofp <4 x i64> %idx to <4 x double> |
| %res = fadd <4 x double> %2, %a1 |
| ret <4 x double> %res |
| } |
| |
| define <4 x double> @permpd_maskz_256(<4 x double> %a0, <4 x i64> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permpd_maskz_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermpd %ymm0, %ymm1, %ymm2 {%k1} {z} |
| ; ENABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vaddpd %ymm0, %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_maskz_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpermpd %ymm0, %ymm1, %ymm2 {%k1} {z} |
| ; DISABLE-NEXT: vcvtqq2pd %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vaddpd %ymm0, %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> %idx, <4 x double> zeroinitializer, i8 %2) |
| %a1 = sitofp <4 x i64> %idx to <4 x double> |
| %t = fadd <4 x double> %a0, %a1 |
| %res = fadd <4 x double> %3, %t |
| ret <4 x double> %res |
| } |
| |
| declare <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double>, <4 x i64>) |
| declare <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double>, <4 x i64>, <4 x double>, i8) |
| |
| define <8 x double> @permpd_rr_512(<8 x double> %a0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_rr_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovapd %zmm0, %zmm2 |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermpd %zmm2, %zmm0, %zmm1 |
| ; ENABLE-NEXT: vcvtqq2pd %zmm0, %zmm0 |
| ; ENABLE-NEXT: vaddpd %zmm0, %zmm2, %zmm0 |
| ; ENABLE-NEXT: vaddpd %zmm0, %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_rr_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovapd %zmm0, %zmm2 |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vpermpd %zmm2, %zmm0, %zmm1 |
| ; DISABLE-NEXT: vcvtqq2pd %zmm0, %zmm0 |
| ; DISABLE-NEXT: vaddpd %zmm0, %zmm2, %zmm0 |
| ; DISABLE-NEXT: vaddpd %zmm0, %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x double> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %1, <8 x i64> %idx) |
| %a1 = sitofp <8 x i64> %idx to <8 x double> |
| %t = fadd <8 x double> %1, %a1 |
| %res = fadd <8 x double> %2, %t |
| ret <8 x double> %res |
| } |
| |
| define <8 x double> @permpd_rm_512(ptr %p0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_rm_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd (%rdi), %zmm1, %zmm0 |
| ; ENABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_rm_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermpd (%rdi), %zmm1, %zmm0 |
| ; DISABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <8 x double>, ptr %p0, align 64 |
| %2 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %a0, <8 x i64> %idx) |
| %a1 = sitofp <8 x i64> %idx to <8 x double> |
| %res = fadd <8 x double> %2, %a1 |
| ret <8 x double> %res |
| } |
| |
| define <8 x double> @permpd_broadcast_512(ptr %p0, <8 x i64> %idx) { |
| ; ENABLE-LABEL: permpd_broadcast_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermpd (%rdi){1to8}, %zmm1, %zmm0 |
| ; ENABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_broadcast_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermpd (%rdi){1to8}, %zmm1, %zmm0 |
| ; DISABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load double, ptr %p0, align 4 |
| %t0 = insertelement <8 x double> undef, double %v0, i64 0 |
| %a0 = shufflevector <8 x double> %t0, <8 x double> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %a0, <8 x i64> %idx) |
| %a1 = sitofp <8 x i64> %idx to <8 x double> |
| %res = fadd <8 x double> %2, %a1 |
| ret <8 x double> %res |
| } |
| |
| define <8 x double> @permpd_maskz_512(<8 x double> %a0, <8 x i64> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permpd_maskz_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1} {z} |
| ; ENABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vaddpd %zmm0, %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permpd_maskz_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1} {z} |
| ; DISABLE-NEXT: vcvtqq2pd %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddpd %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vaddpd %zmm0, %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> %idx, <8 x double> zeroinitializer, i8 %2) |
| %a1 = sitofp <8 x i64> %idx to <8 x double> |
| %t = fadd <8 x double> %a0, %a1 |
| %res = fadd <8 x double> %3, %t |
| ret <8 x double> %res |
| } |
| |
| declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>) |
| declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64>, <8 x double>, i8) |
| |
| |
| define <8 x float> @permps_rr_256(<8 x float> %a0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permps_rr_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps %ymm0, %ymm2 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermps %ymm2, %ymm0, %ymm1 |
| ; ENABLE-NEXT: vcvtdq2ps %ymm0, %ymm0 |
| ; ENABLE-NEXT: vaddps %ymm0, %ymm2, %ymm0 |
| ; ENABLE-NEXT: vaddps %ymm0, %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_rr_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps %ymm0, %ymm2 |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; DISABLE-NEXT: vpermps %ymm2, %ymm0, %ymm1 |
| ; DISABLE-NEXT: vcvtdq2ps %ymm0, %ymm0 |
| ; DISABLE-NEXT: vaddps %ymm0, %ymm2, %ymm0 |
| ; DISABLE-NEXT: vaddps %ymm0, %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <8 x float> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %1, <8 x i32> %idx, <8 x float> zeroinitializer, i8 -1) |
| %a1 = sitofp <8 x i32> %idx to <8 x float> |
| %t = fadd <8 x float> %1, %a1 |
| %res = fadd <8 x float> %2, %t |
| ret <8 x float> %res |
| } |
| |
| define <8 x float> @permps_rm_256(ptr %p0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permps_rm_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermps (%rdi), %ymm1, %ymm0 |
| ; ENABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_rm_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermps (%rdi), %ymm1, %ymm0 |
| ; DISABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <8 x float>, ptr %p0, align 64 |
| %2 = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> %idx, <8 x float> zeroinitializer, i8 -1) |
| %a1 = sitofp <8 x i32> %idx to <8 x float> |
| %res = fadd <8 x float> %2, %a1 |
| ret <8 x float> %res |
| } |
| |
| define <8 x float> @permps_broadcast_256(ptr %p0, <8 x i32> %idx) { |
| ; ENABLE-LABEL: permps_broadcast_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermps (%rdi){1to8}, %ymm1, %ymm0 |
| ; ENABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_broadcast_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; DISABLE-NEXT: vpermps (%rdi){1to8}, %ymm1, %ymm0 |
| ; DISABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load float, ptr %p0, align 4 |
| %t0 = insertelement <8 x float> undef, float %v0, i32 0 |
| %a0 = shufflevector <8 x float> %t0, <8 x float> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> %idx, <8 x float> zeroinitializer, i8 -1) |
| %a1 = sitofp <8 x i32> %idx to <8 x float> |
| %res = fadd <8 x float> %2, %a1 |
| ret <8 x float> %res |
| } |
| |
| define <8 x float> @permps_maskz_256(<8 x float> %a0, <8 x i32> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permps_maskz_256: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermps %ymm0, %ymm1, %ymm2 {%k1} {z} |
| ; ENABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; ENABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; ENABLE-NEXT: vaddps %ymm0, %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_maskz_256: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vpermps %ymm0, %ymm1, %ymm2 {%k1} {z} |
| ; DISABLE-NEXT: vcvtdq2ps %ymm1, %ymm1 |
| ; DISABLE-NEXT: vaddps %ymm1, %ymm0, %ymm0 |
| ; DISABLE-NEXT: vaddps %ymm0, %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> %idx, <8 x float> zeroinitializer, i8 %2) |
| %a1 = sitofp <8 x i32> %idx to <8 x float> |
| %t = fadd <8 x float> %a0, %a1 |
| %res = fadd <8 x float> %3, %t |
| ret <8 x float> %res |
| } |
| |
| declare <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float>, <8 x i32>, <8 x float>, i8) |
| |
| define <16 x float> @permps_rr_512(<16 x float> %a0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permps_rr_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps %zmm0, %zmm2 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vpermps %zmm2, %zmm0, %zmm1 |
| ; ENABLE-NEXT: vcvtdq2ps %zmm0, %zmm0 |
| ; ENABLE-NEXT: vaddps %zmm0, %zmm2, %zmm0 |
| ; ENABLE-NEXT: vaddps %zmm0, %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_rr_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps %zmm0, %zmm2 |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vpermps %zmm2, %zmm0, %zmm1 |
| ; DISABLE-NEXT: vcvtdq2ps %zmm0, %zmm0 |
| ; DISABLE-NEXT: vaddps %zmm0, %zmm2, %zmm0 |
| ; DISABLE-NEXT: vaddps %zmm0, %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <16 x float> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %1, <16 x i32> %idx) |
| %a1 = sitofp <16 x i32> %idx to <16 x float> |
| %t = fadd <16 x float> %1, %a1 |
| %res = fadd <16 x float> %2, %t |
| ret <16 x float> %res |
| } |
| |
| define <16 x float> @permps_rm_512(ptr %p0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permps_rm_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermps (%rdi), %zmm1, %zmm0 |
| ; ENABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_rm_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermps (%rdi), %zmm1, %zmm0 |
| ; DISABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a0 = load <16 x float>, ptr %p0, align 64 |
| %2 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %a0, <16 x i32> %idx) |
| %a1 = sitofp <16 x i32> %idx to <16 x float> |
| %res = fadd <16 x float> %2, %a1 |
| ret <16 x float> %res |
| } |
| |
| define <16 x float> @permps_broadcast_512(ptr %p0, <16 x i32> %idx) { |
| ; ENABLE-LABEL: permps_broadcast_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm0, %xmm0, %xmm0 |
| ; ENABLE-NEXT: vpermps (%rdi){1to16}, %zmm1, %zmm0 |
| ; ENABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_broadcast_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; DISABLE-NEXT: vpermps (%rdi){1to16}, %zmm1, %zmm0 |
| ; DISABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v0 = load float, ptr %p0, align 4 |
| %t0 = insertelement <16 x float> undef, float %v0, i32 0 |
| %a0 = shufflevector <16 x float> %t0, <16 x float> undef, <16 x i32> zeroinitializer |
| %2 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %a0, <16 x i32> %idx) |
| %a1 = sitofp <16 x i32> %idx to <16 x float> |
| %res = fadd <16 x float> %2, %a1 |
| ret <16 x float> %res |
| } |
| |
| define <16 x float> @permps_maskz_512(<16 x float> %a0, <16 x i32> %idx, ptr %mask) { |
| ; ENABLE-LABEL: permps_maskz_512: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovw (%rdi), %k1 |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1} {z} |
| ; ENABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; ENABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; ENABLE-NEXT: vaddps %zmm0, %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: permps_maskz_512: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovw (%rdi), %k1 |
| ; DISABLE-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1} {z} |
| ; DISABLE-NEXT: vcvtdq2ps %zmm1, %zmm1 |
| ; DISABLE-NEXT: vaddps %zmm1, %zmm0, %zmm0 |
| ; DISABLE-NEXT: vaddps %zmm0, %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i16, ptr %mask |
| %3 = call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> %idx, <16 x float> zeroinitializer, i16 %2) |
| %a1 = sitofp <16 x i32> %idx to <16 x float> |
| %t = fadd <16 x float> %a0, %a1 |
| %res = fadd <16 x float> %3, %t |
| ret <16 x float> %res |
| } |
| |
| declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>) |
| declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i32>, <16 x float>, i16) |