| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-tile,+avx512f \ |
| ; RUN: -mattr=+amx-fp8 -verify-machineinstrs | FileCheck %s |
| |
| define void @test_amx(i8* %pointer, i8* %base, i64 %stride) { |
| ; CHECK-LABEL: test_amx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pushq %rbp |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: subq $3952, %rsp # imm = 0xF70 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 3968 |
| ; CHECK-NEXT: .cfi_offset %rbp, -16 |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, %ax |
| ; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 |
| ; CHECK-NEXT: tilezero %tmm1 |
| ; CHECK-NEXT: tilezero %tmm2 |
| ; CHECK-NEXT: movabsq $64, %rbp |
| ; CHECK-NEXT: tilestored %tmm2, 896(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdpbf8ps %tmm1, %tmm0, %tmm3 |
| ; CHECK-NEXT: tdpbhf8ps %tmm1, %tmm0, %tmm3 |
| ; CHECK-NEXT: tilestored %tmm2, 1920(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 1920(%rsp,%rbp), %tmm4 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdphbf8ps %tmm1, %tmm0, %tmm4 |
| ; CHECK-NEXT: tdphf8ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: tilestored %tmm3, (%rdi,%rdx) |
| ; CHECK-NEXT: addq $3952, %rsp # imm = 0xF70 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: popq %rbp |
| ; CHECK-NEXT: .cfi_def_cfa_offset 8 |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| |
| %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride) |
| %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| |
| %c1 = call x86_amx @llvm.x86.tdpbf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| %c2 = call x86_amx @llvm.x86.tdpbhf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b) |
| %c3 = call x86_amx @llvm.x86.tdphbf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| %c4 = call x86_amx @llvm.x86.tdphf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c2) |
| ret void |
| } |
| |
| declare x86_amx @llvm.x86.tilezero.internal(i16, i16) |
| declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) |
| declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) |
| |
| declare x86_amx @llvm.x86.tdpbf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdpbhf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdphbf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdphf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |