| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-tile,+avx512f \ |
| ; RUN: -mattr=+amx-fp8 -verify-machineinstrs | FileCheck %s |
| |
| define void @test_amx(i8* %pointer, i8* %base, i64 %stride) { |
| ; CHECK-LABEL: test_amx: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pushq %rbp |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: subq $3952, %rsp # imm = 0xF70 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 3968 |
| ; CHECK-NEXT: .cfi_offset %rbp, -16 |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw $8, %ax |
| ; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 |
| ; CHECK-NEXT: tilezero %tmm1 |
| ; CHECK-NEXT: tilezero %tmm2 |
| ; CHECK-NEXT: movabsq $64, %rbp |
| ; CHECK-NEXT: tilestored %tmm2, 896(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdpbf8ps %tmm1, %tmm0, %tmm3 |
| ; CHECK-NEXT: tdpbhf8ps %tmm1, %tmm0, %tmm3 |
| ; CHECK-NEXT: tilestored %tmm2, 1920(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 1920(%rsp,%rbp), %tmm4 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdphbf8ps %tmm1, %tmm0, %tmm4 |
| ; CHECK-NEXT: tdphf8ps %tmm1, %tmm0, %tmm2 |
| ; CHECK-NEXT: tilestored %tmm3, (%rdi,%rdx) |
| ; CHECK-NEXT: addq $3952, %rsp # imm = 0xF70 |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: popq %rbp |
| ; CHECK-NEXT: .cfi_def_cfa_offset 8 |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| |
| %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride) |
| %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) |
| |
| %c1 = call x86_amx @llvm.x86.tdpbf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| %c2 = call x86_amx @llvm.x86.tdpbhf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b) |
| %c3 = call x86_amx @llvm.x86.tdphbf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| %c4 = call x86_amx @llvm.x86.tdphf8ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) |
| |
| call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c2) |
| ret void |
| } |
| |
| ; Function Attrs: nounwind |
| define dso_local void @__tile_dpbf8ps(ptr %dst, ptr %src1, ptr %src2) #0 { |
| ; CHECK-LABEL: __tile_dpbf8ps: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: pushq %rbp |
| ; CHECK-NEXT: subq $4976, %rsp # imm = 0x1370 |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movzwl (%rsi), %eax |
| ; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movswq 2(%rdx), %rcx |
| ; CHECK-NEXT: movw %cx, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw %cx, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movw %cx, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movswq 2(%rsi), %r8 |
| ; CHECK-NEXT: movw %r8w, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: movzwl %r8w, %r9d |
| ; CHECK-NEXT: movb %r9b, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: shrl $2, %r9d |
| ; CHECK-NEXT: movb %r9b, {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp) |
| ; CHECK-NEXT: addq $64, %rdi |
| ; CHECK-NEXT: tileloadd (%rdi,%rcx), %tmm0 |
| ; CHECK-NEXT: addq $64, %rsi |
| ; CHECK-NEXT: tileloadd (%rsi,%r8), %tmm1 |
| ; CHECK-NEXT: addq $64, %rdx |
| ; CHECK-NEXT: tileloadd (%rdx,%rcx), %tmm2 |
| ; CHECK-NEXT: movabsq $64, %rbp |
| ; CHECK-NEXT: tilestored %tmm0, 896(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdpbf8ps %tmm2, %tmm1, %tmm3 |
| ; CHECK-NEXT: tilestored %tmm3, (%rdi,%rcx) |
| ; CHECK-NEXT: tilestored %tmm0, 1920(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 1920(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdpbhf8ps %tmm2, %tmm1, %tmm3 |
| ; CHECK-NEXT: tilestored %tmm3, (%rdi,%rcx) |
| ; CHECK-NEXT: tilestored %tmm0, 2944(%rsp,%rbp) # 1024-byte Folded Spill |
| ; CHECK-NEXT: tileloadd 2944(%rsp,%rbp), %tmm3 # 1024-byte Folded Reload |
| ; CHECK-NEXT: tdphbf8ps %tmm2, %tmm1, %tmm3 |
| ; CHECK-NEXT: tilestored %tmm3, (%rdi,%rcx) |
| ; CHECK-NEXT: tdphf8ps %tmm2, %tmm1, %tmm0 |
| ; CHECK-NEXT: tilestored %tmm0, (%rdi,%rcx) |
| ; CHECK-NEXT: addq $4976, %rsp # imm = 0x1370 |
| ; CHECK-NEXT: popq %rbp |
| ; CHECK-NEXT: tilerelease |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = load i16, ptr %src1, align 64 |
| %col = getelementptr inbounds nuw i8, ptr %src2, i64 2 |
| %1 = load i16, ptr %col, align 2 |
| %col1 = getelementptr inbounds nuw i8, ptr %src1, i64 2 |
| %2 = load i16, ptr %col1, align 2 |
| %tile = getelementptr inbounds nuw i8, ptr %dst, i64 64 |
| %3 = load <256 x i32>, ptr %tile, align 64 |
| %tile2 = getelementptr inbounds nuw i8, ptr %src1, i64 64 |
| %4 = load <256 x i32>, ptr %tile2, align 64 |
| %tile3 = getelementptr inbounds nuw i8, ptr %src2, i64 64 |
| %5 = load <256 x i32>, ptr %tile3, align 64 |
| %6 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %3) |
| %7 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %4) |
| %8 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %5) |
| |
| %9 = tail call x86_amx @llvm.x86.tdpbf8ps.internal(i16 %0, i16 %1, i16 %2, x86_amx %6, x86_amx %7, x86_amx %8) |
| %10 = tail call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %9) |
| store <256 x i32> %10, ptr %tile, align 64 |
| |
| %11 = tail call x86_amx @llvm.x86.tdpbhf8ps.internal(i16 %0, i16 %1, i16 %2, x86_amx %6, x86_amx %7, x86_amx %8) |
| %12 = tail call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %11) |
| store <256 x i32> %12, ptr %tile, align 64 |
| |
| %13 = tail call x86_amx @llvm.x86.tdphbf8ps.internal(i16 %0, i16 %1, i16 %2, x86_amx %6, x86_amx %7, x86_amx %8) |
| %14 = tail call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %13) |
| store <256 x i32> %14, ptr %tile, align 64 |
| |
| %15 = tail call x86_amx @llvm.x86.tdphf8ps.internal(i16 %0, i16 %1, i16 %2, x86_amx %6, x86_amx %7, x86_amx %8) |
| %16 = tail call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %15) |
| store <256 x i32> %16, ptr %tile, align 64 |
| |
| ret void |
| } |
| |
| declare x86_amx @llvm.x86.tilezero.internal(i16, i16) |
| declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) |
| declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) |
| |
| declare x86_amx @llvm.x86.tdpbf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdpbhf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdphbf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| declare x86_amx @llvm.x86.tdphf8ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) |
| |
| attributes #0 = { nounwind } |
| |