| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2,+fp8 -verify-machineinstrs -force-streaming -enable-subreg-liveness < %s | FileCheck %s |
| |
| ; FCVT / FCVTN / BFCVT |
| |
| define <vscale x 16 x i8> @fcvt_x2(<vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) { |
| ; CHECK-LABEL: fcvt_x2: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcvt z0.b, { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8f16(<vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 16 x i8> @fcvt_x4(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) { |
| ; CHECK-LABEL: fcvt_x4: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcvt z0.b, { z0.s - z3.s } |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, |
| <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @fcvt_x4_tuple(i64 %stride, ptr %ptr) { |
| ; CHECK-LABEL: fcvt_x4_tuple: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: addvl sp, sp, #-9 |
| ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill |
| ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: lsl x8, x0, #1 |
| ; CHECK-NEXT: add x9, x1, x0 |
| ; CHECK-NEXT: ptrue pn8.b |
| ; CHECK-NEXT: ld1w { z16.s, z20.s, z24.s, z28.s }, pn8/z, [x1] |
| ; CHECK-NEXT: ld1w { z17.s, z21.s, z25.s, z29.s }, pn8/z, [x9] |
| ; CHECK-NEXT: add x10, x1, x8 |
| ; CHECK-NEXT: add x8, x9, x8 |
| ; CHECK-NEXT: ld1w { z18.s, z22.s, z26.s, z30.s }, pn8/z, [x10] |
| ; CHECK-NEXT: ld1w { z19.s, z23.s, z27.s, z31.s }, pn8/z, [x8] |
| ; CHECK-NEXT: fcvt z0.b, { z16.s - z19.s } |
| ; CHECK-NEXT: fcvt z1.b, { z20.s - z23.s } |
| ; CHECK-NEXT: fcvt z2.b, { z24.s - z27.s } |
| ; CHECK-NEXT: fcvt z3.b, { z28.s - z31.s } |
| ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload |
| ; CHECK-NEXT: addvl sp, sp, #9 |
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() |
| %1 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) |
| %2 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 0 |
| %3 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 1 |
| %4 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 2 |
| %5 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 3 |
| %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride |
| %6 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx2) |
| %7 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 0 |
| %8 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 1 |
| %9 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 2 |
| %10 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 3 |
| %mul3 = shl i64 %stride, 1 |
| %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 |
| %11 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx4) |
| %12 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 0 |
| %13 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 1 |
| %14 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 2 |
| %15 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 3 |
| %mul5 = mul i64 %stride, 3 |
| %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 |
| %16 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx6) |
| %17 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 0 |
| %18 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 1 |
| %19 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 2 |
| %20 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 3 |
| %res1 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> %2, <vscale x 4 x float> %7, <vscale x 4 x float> %12, <vscale x 4 x float> %17) |
| %res2 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> %3, <vscale x 4 x float> %8, <vscale x 4 x float> %13, <vscale x 4 x float> %18) |
| %res3 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> %4, <vscale x 4 x float> %9, <vscale x 4 x float> %14, <vscale x 4 x float> %19) |
| %res4 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x4(<vscale x 4 x float> %5, <vscale x 4 x float> %10, <vscale x 4 x float> %15, <vscale x 4 x float> %20) |
| %ins1 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> %res1, 0 |
| %ins2 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins1, <vscale x 16 x i8> %res2, 1 |
| %ins3 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins2, <vscale x 16 x i8> %res3, 2 |
| %ins4 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins3, <vscale x 16 x i8> %res4, 3 |
| ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins4 |
| } |
| |
| define <vscale x 16 x i8> @fcvtn(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) { |
| ; CHECK-LABEL: fcvtn: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcvtn z0.b, { z0.s - z3.s } |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvtn.x4(<vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, |
| <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 16 x i8> @bfcvt(<vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) { |
| ; CHECK-LABEL: bfcvt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bfcvt z0.b, { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8bf16(<vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| |
| define { <vscale x 16 x i8>, <vscale x 16 x i8> } @bfcvt_tuple(i64 %stride, ptr %ptr) { |
| ; CHECK-LABEL: bfcvt_tuple: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: addvl sp, sp, #-3 |
| ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill |
| ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 |
| ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 |
| ; CHECK-NEXT: ptrue pn8.b |
| ; CHECK-NEXT: add x8, x1, x0 |
| ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] |
| ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x8] |
| ; CHECK-NEXT: bfcvt z0.b, { z2.h, z3.h } |
| ; CHECK-NEXT: bfcvt z1.b, { z10.h, z11.h } |
| ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload |
| ; CHECK-NEXT: addvl sp, sp, #3 |
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() |
| %1 = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %0, ptr %ptr) |
| %2 = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %1, 0 |
| %3 = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %1, 1 |
| %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride |
| %4 = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %0, ptr %arrayidx2) |
| %5 = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %4, 0 |
| %6 = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %4, 1 |
| %res1 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8bf16(<vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %5) |
| %res2 = call <vscale x 16 x i8> @llvm.aarch64.sve.fp8.cvt.x2.nxv8bf16(<vscale x 8 x bfloat> %3, <vscale x 8 x bfloat> %6) |
| %ins1 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> %res1, 0 |
| %ins2 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %ins1, <vscale x 16 x i8> %res2, 1 |
| ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %ins2 |
| } |
| |
| ; F1CVT / F2CVT |
| |
| define { <vscale x 8 x half>, <vscale x 8 x half> } @f1cvt(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: f1cvt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: f1cvt { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fp8.cvt1.x2.nxv8f16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x half>, <vscale x 8 x half> } %res |
| } |
| |
| define { <vscale x 8 x half>, <vscale x 8 x half> } @f2cvt(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: f2cvt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: f2cvt { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fp8.cvt2.x2.nxv8f16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x half>, <vscale x 8 x half> } %res |
| } |
| |
| ; BF1CVT / BF2CVT |
| |
| define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @bf1cvt(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: bf1cvt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bf1cvt { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.fp8.cvt1.x2.nxv8bf16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res |
| } |
| |
| define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @bf2cvt(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: bf2cvt: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bf2cvt { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.fp8.cvt2.x2.nxv8bf16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res |
| } |
| |
| ; F1CVTL / F2CVTL |
| |
| define { <vscale x 8 x half>, <vscale x 8 x half> } @f1cvtl(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: f1cvtl: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: f1cvtl { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fp8.cvtl1.x2.nxv8f16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x half>, <vscale x 8 x half> } %res |
| } |
| |
| define { <vscale x 8 x half>, <vscale x 8 x half> } @f2cvtl(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: f2cvtl: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: f2cvtl { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fp8.cvtl2.x2.nxvbf16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x half>, <vscale x 8 x half> } %res |
| } |
| |
| ; BF1CVTL / BF2CVTL |
| |
| define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @bf1cvtl(<vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: bf1cvtl: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bf1cvtl { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.fp8.cvtl1.x2.nxv8bf16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res |
| } |
| |
| define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @bf2cvtl( <vscale x 16 x i8> %zm) { |
| ; CHECK-LABEL: bf2cvtl: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: bf2cvtl { z0.h, z1.h }, z0.b |
| ; CHECK-NEXT: ret |
| %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.fp8.cvtl2.x2.nxv8bf16(<vscale x 16 x i8> %zm) |
| ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res |
| } |