| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+amx-bf16,+amx-fp16,+amx-complex,+amx-transpose | FileCheck %s |
| |
| define void @test_amx(i32 %rv32, i64 %stride, i64 %rvalue, i8* %addr1, <4 x float> %xmm) #0 { |
| ; CHECK-LABEL: test_amx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: t2rpntlvwz0 (%rcx,%rsi), %tmm0 |
| ; CHECK-NEXT: t2rpntlvwz0t1 (%rcx,%rsi), %tmm2 |
| ; CHECK-NEXT: t2rpntlvwz1 (%rcx,%rsi), %tmm0 |
| ; CHECK-NEXT: t2rpntlvwz1t1 (%rcx,%rsi), %tmm2 |
| ; CHECK-NEXT: ttransposed %tmm3, %tmm1 |
| ; CHECK-NEXT: ttdpbf16ps %tmm3, %tmm2, %tmm1 |
| ; CHECK-NEXT: ttdpfp16ps %tmm6, %tmm5, %tmm4 |
| ; CHECK-NEXT: ttcmmimfp16ps %tmm3, %tmm2, %tmm1 |
| ; CHECK-NEXT: ttcmmrlfp16ps %tmm3, %tmm2, %tmm1 |
| ; CHECK-NEXT: tconjtcmmimfp16ps %tmm3, %tmm2, %tmm1 |
| ; CHECK-NEXT: tconjtfp16 %tmm2, %tmm1 |
| ; CHECK-NEXT: retq |
| call void @llvm.x86.t2rpntlvwz0(i8 1, i8* %addr1, i64 %stride) |
| call void @llvm.x86.t2rpntlvwz0t1(i8 2, i8* %addr1, i64 %stride) |
| call void @llvm.x86.t2rpntlvwz1(i8 1, i8* %addr1, i64 %stride) |
| call void @llvm.x86.t2rpntlvwz1t1(i8 2, i8* %addr1, i64 %stride) |
| call void @llvm.x86.ttransposed(i8 1, i8 3) |
| call void @llvm.x86.ttdpbf16ps(i8 1, i8 2, i8 3) |
| call void @llvm.x86.ttdpfp16ps(i8 4, i8 5, i8 6) |
| call void @llvm.x86.ttcmmimfp16ps(i8 1, i8 2, i8 3) |
| call void @llvm.x86.ttcmmrlfp16ps(i8 1, i8 2, i8 3) |
| call void @llvm.x86.tconjtcmmimfp16ps(i8 1, i8 2, i8 3) |
| call void @llvm.x86.tconjtfp16(i8 1, i8 2) |
| ret void |
| } |
| |
| declare void @llvm.x86.t2rpntlvwz0(i8 %tile1, i8* %addr1, i64 %stride) |
| declare void @llvm.x86.t2rpntlvwz0t1(i8 %tile1, i8* %addr1, i64 %stride) |
| declare void @llvm.x86.t2rpntlvwz1(i8 %tile1, i8* %addr1, i64 %stride) |
| declare void @llvm.x86.t2rpntlvwz1t1(i8 %tile1, i8* %addr1, i64 %stride) |
| declare void @llvm.x86.ttransposed(i8 %tile0, i8 %tile1) |
| declare void @llvm.x86.ttdpbf16ps(i8 %tile0, i8 %tile1, i8 %tile2) |
| declare void @llvm.x86.ttdpfp16ps(i8 %tile0, i8 %tile1, i8 %tile2) |
| declare void @llvm.x86.ttcmmimfp16ps(i8 %A, i8 %B, i8 %C) |
| declare void @llvm.x86.ttcmmrlfp16ps(i8 %A, i8 %B, i8 %C) |
| declare void @llvm.x86.tconjtcmmimfp16ps(i8 %A, i8 %B, i8 %C) |
| declare void @llvm.x86.tconjtfp16(i8 %A, i8 %B) |
| |
| define void @test_amx2(i8* %pointer, i8* %base, i64 %stride) #0 { |
| ; CHECK-LABEL: test_amx2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pushq %rbp |
| ; CHECK-NEXT: subq $2928, %rsp # imm = 0xB70 |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, %ax |
| ; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 |
| ; CHECK-NEXT: tilezero %tmm1 |
| ; CHECK-NEXT: tilezero %tmm2 |
| ; CHECK-NEXT: ttdpbf16ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: ttdpfp16ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: ttcmmimfp16ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: ttcmmrlfp16ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: movabsq $64, %rbp |
| ; CHECK-NEXT: tilestored %tmm2, 896(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tconjtcmmimfp16ps %tmm1, %tmm0, %tmm3 |
| ; CHECK-NEXT: tconjtfp16 %tmm3, %tmm0 |
| ; CHECK-NEXT: tilestored %tmm2, (%rdi,%rdx) |
| ; CHECK-NEXT: addq $2928, %rsp # imm = 0xB70 |
| ; CHECK-NEXT: popq %rbp |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| |
| %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride) |
| %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| %c1 = call x86_amx @llvm.x86.ttdpbf16ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| %c2 = call x86_amx @llvm.x86.ttdpfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b) |
| %c3 = call x86_amx @llvm.x86.ttcmmimfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c2, x86_amx %a, x86_amx %b) |
| %c4 = call x86_amx @llvm.x86.ttcmmrlfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c3, x86_amx %a, x86_amx %b) |
| %c5 = call x86_amx @llvm.x86.tconjtcmmimfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c4, x86_amx %a, x86_amx %b) |
| %c6 = call x86_amx @llvm.x86.tconjtfp16.internal(i16 8, i16 8, x86_amx %c5) |
| |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c4) |
| ret void |
| } |
| |
| define void @test_amx3(i8* %pointer, i8* %base, i64 %stride) #0 { |
| ; CHECK-LABEL: test_amx3: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $0, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: movw $8, %cx |
| ; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm4 |
| ; CHECK-NEXT: t2rpntlvwz0t1 (%rsi,%rdx), %tmm4 |
| ; CHECK-NEXT: t2rpntlvwz1 (%rsi,%rdx), %tmm4 |
| ; CHECK-NEXT: t2rpntlvwz1t1 (%rsi,%rdx), %tmm4 |
| ; CHECK-NEXT: ttransposed %tmm4, %tmm0 |
| ; CHECK-NEXT: tilestored %tmm0, (%rdi,%rdx) |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %1 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride) |
| %2 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride) |
| %3 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride) |
| %4 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride) |
| %5 = extractvalue { x86_amx, x86_amx } %4, 0 |
| %6 = call x86_amx @llvm.x86.ttransposed.internal(i16 8, i16 8, x86_amx %5) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %6) |
| ret void |
| } |
| |
| define void @test_amx_spill(i8* %pointer, i8* %base, i64 %stride) #0 { |
| ; CHECK-LABEL: test_amx_spill: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: subq $6088, %rsp # imm = 0x17C8 |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, %ax |
| ; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 |
| ; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm4 |
| ; CHECK-NEXT: t2rpntlvwz0t1 (%rsi,%rdx), %tmm6 |
| ; CHECK-NEXT: movabsq $64, %rcx |
| ; CHECK-NEXT: tilestored %tmm6, 4032(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tilestored %tmm7, 5056(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: t2rpntlvwz1 (%rsi,%rdx), %tmm6 |
| ; CHECK-NEXT: tilestored %tmm6, 1984(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tilestored %tmm7, 3008(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: t2rpntlvwz1t1 (%rsi,%rdx), %tmm6 |
| ; CHECK-NEXT: tilestored %tmm6, -64(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tilestored %tmm7, 960(%rsp,%rcx) # 1024-byte Folded Spill |
| ; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm6 |
| ; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx) |
| ; CHECK-NEXT: tileloadd 4032(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tileloadd 5056(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx) |
| ; CHECK-NEXT: tileloadd 1984(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tileloadd 3008(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx) |
| ; CHECK-NEXT: tileloadd -64(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tileloadd 960(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm6, (%rsi,%rdx) |
| ; CHECK-NEXT: tilestored %tmm7, (%rsi,%rdx) |
| ; CHECK-NEXT: addq $6088, %rsp # imm = 0x17C8 |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride) |
| %b1 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride) |
| %b2 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride) |
| %b3 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride) |
| %b4 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride) |
| %b5 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride) |
| %e11 = extractvalue { x86_amx, x86_amx } %b1, 0 |
| %e12 = extractvalue { x86_amx, x86_amx } %b1, 1 |
| %e21 = extractvalue { x86_amx, x86_amx } %b2, 0 |
| %e22 = extractvalue { x86_amx, x86_amx } %b2, 1 |
| %e31 = extractvalue { x86_amx, x86_amx } %b3, 0 |
| %e32 = extractvalue { x86_amx, x86_amx } %b3, 1 |
| %e41 = extractvalue { x86_amx, x86_amx } %b4, 0 |
| %e42 = extractvalue { x86_amx, x86_amx } %b4, 1 |
| %e51 = extractvalue { x86_amx, x86_amx } %b5, 0 |
| %e52 = extractvalue { x86_amx, x86_amx } %b5, 1 |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e11) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e12) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e21) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e22) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e31) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e32) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e41) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e42) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e51) |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e52) |
| ret void |
| } |
| |
| declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) |
| declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) |
| declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16, i16, i16, i8*, i64) |
| declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16, i16, i16, i8*, i64) |
| declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16, i16, i16, i8*, i64) |
| declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16, i16, i16, i8*, i64) |
| declare x86_amx @llvm.x86.ttransposed.internal(i16, i16, x86_amx) |
| declare x86_amx @llvm.x86.ttdpbf16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.ttdpfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.ttcmmimfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.ttcmmrlfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tconjtcmmimfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tconjtfp16.internal(i16, i16, x86_amx) |
| |
| attributes #0 = { nounwind } |