| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -enable-subreg-liveness -verify-machineinstrs < %s | FileCheck %s |
| |
| ; |
| ; S/UQRSHR x2 |
| ; |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_s16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z3.d, z2.d |
| ; CHECK-NEXT: mov z2.d, z1.d |
| ; CHECK-NEXT: sqrshr z0.h, { z2.s, z3.s }, #16 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vector_sat_shift_narrow_x2_s16_tuple(i64 %stride, ptr %ptr) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_s16_tuple: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: addvl sp, sp, #-3 |
| ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill |
| ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG |
| ; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG |
| ; CHECK-NEXT: ptrue pn8.b |
| ; CHECK-NEXT: add x8, x1, x0 |
| ; CHECK-NEXT: ld1w { z2.s, z10.s }, pn8/z, [x1] |
| ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x8] |
| ; CHECK-NEXT: sqrshr z0.h, { z2.s, z3.s }, #16 |
| ; CHECK-NEXT: sqrshr z1.h, { z10.s, z11.s }, #16 |
| ; CHECK-NEXT: ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload |
| ; CHECK-NEXT: addvl sp, sp, #3 |
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() |
| %1 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) |
| %2 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 0 |
| %3 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 1 |
| %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride |
| %4 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx2) |
| %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %4, 0 |
| %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %4, 1 |
| %res1 = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %2, <vscale x 4 x i32> %5, i32 16) |
| %res2 = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %3, <vscale x 4 x i32> %6, i32 16) |
| %ins1 = insertvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } poison, <vscale x 8 x i16> %res1, 0 |
| %ins2 = insertvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %ins1, <vscale x 8 x i16> %res2, 1 |
| ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %ins2 |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z3.d, z2.d |
| ; CHECK-NEXT: mov z2.d, z1.d |
| ; CHECK-NEXT: uqrshr z0.h, { z2.s, z3.s }, #16 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| ; |
| ; S/UQRSHR x4 |
| ; |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshr z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @multi_vector_sat_shift_narrow_x4_s8_tuple(i64 %stride, ptr %ptr) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s8_tuple: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: addvl sp, sp, #-9 |
| ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill |
| ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: lsl x8, x0, #1 |
| ; CHECK-NEXT: add x9, x1, x0 |
| ; CHECK-NEXT: ptrue pn8.b |
| ; CHECK-NEXT: ld1w { z16.s, z20.s, z24.s, z28.s }, pn8/z, [x1] |
| ; CHECK-NEXT: ld1w { z17.s, z21.s, z25.s, z29.s }, pn8/z, [x9] |
| ; CHECK-NEXT: add x10, x1, x8 |
| ; CHECK-NEXT: add x8, x9, x8 |
| ; CHECK-NEXT: ld1w { z18.s, z22.s, z26.s, z30.s }, pn8/z, [x10] |
| ; CHECK-NEXT: ld1w { z19.s, z23.s, z27.s, z31.s }, pn8/z, [x8] |
| ; CHECK-NEXT: sqrshr z0.b, { z16.s - z19.s }, #32 |
| ; CHECK-NEXT: sqrshr z1.b, { z20.s - z23.s }, #32 |
| ; CHECK-NEXT: sqrshr z2.b, { z24.s - z27.s }, #32 |
| ; CHECK-NEXT: sqrshr z3.b, { z28.s - z31.s }, #32 |
| ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload |
| ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload |
| ; CHECK-NEXT: addvl sp, sp, #9 |
| ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() |
| %1 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %0, ptr %ptr) |
| %2 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 0 |
| %3 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 1 |
| %4 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 2 |
| %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %1, 3 |
| %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride |
| %6 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %0, ptr %arrayidx2) |
| %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %6, 0 |
| %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %6, 1 |
| %9 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %6, 2 |
| %10 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %6, 3 |
| %mul3 = shl i64 %stride, 1 |
| %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 |
| %11 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %0, ptr %arrayidx4) |
| %12 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %11, 0 |
| %13 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %11, 1 |
| %14 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %11, 2 |
| %15 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %11, 3 |
| %mul5 = mul i64 %stride, 3 |
| %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 |
| %16 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %0, ptr %arrayidx6) |
| %17 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %16, 0 |
| %18 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %16, 1 |
| %19 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %16, 2 |
| %20 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %16, 3 |
| %res1 = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %2, <vscale x 4 x i32> %7, <vscale x 4 x i32> %12, <vscale x 4 x i32> %17, i32 32) |
| %res2 = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %3, <vscale x 4 x i32> %8, <vscale x 4 x i32> %13, <vscale x 4 x i32> %18, i32 32) |
| %res3 = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %4, <vscale x 4 x i32> %9, <vscale x 4 x i32> %14, <vscale x 4 x i32> %19, i32 32) |
| %res4 = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %5, <vscale x 4 x i32> %10, <vscale x 4 x i32> %15, <vscale x 4 x i32> %20, i32 32) |
| %ins1 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> %res1, 0 |
| %ins2 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins1, <vscale x 16 x i8> %res2, 1 |
| %ins3 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins2, <vscale x 16 x i8> %res3, 2 |
| %ins4 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins3, <vscale x 16 x i8> %res4, 3 |
| ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %ins4 |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshr z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: uqrshr z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: uqrshr z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| ; S/UQRSHRN x4 |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshrn z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshrn z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_u8(<vscale x 2 x i64> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: uqrshrn z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: uqrshrn z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| ; SQRSHRU x2 |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) { |
| ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x2_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z3.d, z2.d |
| ; CHECK-NEXT: mov z2.d, z1.d |
| ; CHECK-NEXT: sqrshru z0.h, { z2.s, z3.s }, #16 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| ; SQRSHRU x4 |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshru z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshru z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| ; SQRSHRUN x4 |
| |
| define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshrun z0.b, { z4.s - z7.s }, #32 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) { |
| ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z7.d, z4.d |
| ; CHECK-NEXT: mov z6.d, z3.d |
| ; CHECK-NEXT: mov z5.d, z2.d |
| ; CHECK-NEXT: mov z4.d, z1.d |
| ; CHECK-NEXT: sqrshrun z0.h, { z4.d - z7.d }, #64 |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |
| |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |
| |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32) |