blob: 179dcfa11c1087e87664a2330e50e28d19fd6c73 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s
define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.b, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.h, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.s, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.d, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-LABEL: whilerw_8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilerw p0.b, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.nxv16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-LABEL: whilerw_16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilerw p0.h, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i1> @llvm.loop.dependence.raw.mask.nxv8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-LABEL: whilerw_32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilerw p0.s, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 4 x i1> @llvm.loop.dependence.raw.mask.nxv4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-LABEL: whilerw_64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilerw p0.d, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 2 x i1> @llvm.loop.dependence.raw.mask.nxv2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_8_split:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.b, x0, x1
; CHECK-NEXT: incb x0
; CHECK-NEXT: whilewr p1.b, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 1)
ret <vscale x 32 x i1> %0
}
define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_8_split2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov x8, x0
; CHECK-NEXT: whilewr p0.b, x0, x1
; CHECK-NEXT: addvl x9, x0, #3
; CHECK-NEXT: incb x0, all, mul #2
; CHECK-NEXT: incb x8
; CHECK-NEXT: whilewr p3.b, x9, x1
; CHECK-NEXT: whilewr p2.b, x0, x1
; CHECK-NEXT: whilewr p1.b, x8, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 64 x i1> @llvm.loop.dependence.war.mask.nxv64i1(ptr %a, ptr %b, i64 1)
ret <vscale x 64 x i1> %0
}
define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_16_expand:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: sub x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x8, x8, x8, lsr #63
; CHECK-NEXT: asr x8, x8, #1
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, z0.d
; CHECK-NEXT: mov z2.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: incd z5.d, all, mul #4
; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z1.d
; CHECK-NEXT: incd z1.d, all, mul #4
; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z4.d
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p4.d, p0/z, z2.d, z5.d
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: cmphi p5.d, p0/z, z2.d, z1.d
; CHECK-NEXT: cmphi p7.d, p0/z, z2.d, z4.d
; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z2.d, z3.d
; CHECK-NEXT: uzp1 p2.s, p4.s, p5.s
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: incd z0.d, all, mul #4
; CHECK-NEXT: uzp1 p3.s, p3.s, p6.s
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z0.d
; CHECK-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p7.s, p0.s
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p2.h, p0.h
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 2)
ret <vscale x 16 x i1> %0
}
define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_16_expand2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: sub x8, x1, x0
; CHECK-NEXT: incb x0, all, mul #2
; CHECK-NEXT: add x8, x8, x8, lsr #63
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: asr x8, x8, #1
; CHECK-NEXT: sub x9, x1, x0
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: mov z3.d, z0.d
; CHECK-NEXT: mov z5.d, x8
; CHECK-NEXT: add x9, x9, x9, lsr #63
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z2.d, all, mul #2
; CHECK-NEXT: incd z3.d, all, mul #4
; CHECK-NEXT: cmphi p2.d, p0/z, z5.d, z0.d
; CHECK-NEXT: asr x9, x9, #1
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: mov z6.d, z1.d
; CHECK-NEXT: mov z7.d, z2.d
; CHECK-NEXT: cmphi p1.d, p0/z, z5.d, z1.d
; CHECK-NEXT: cmphi p3.d, p0/z, z5.d, z3.d
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z2.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: incd z6.d, all, mul #4
; CHECK-NEXT: incd z7.d, all, mul #4
; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
; CHECK-NEXT: mov z24.d, z4.d
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z6.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z7.d
; CHECK-NEXT: incd z24.d, all, mul #4
; CHECK-NEXT: uzp1 p2.s, p3.s, p4.s
; CHECK-NEXT: uzp1 p3.s, p5.s, p6.s
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z24.d
; CHECK-NEXT: mov z5.d, x9
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z24.d
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z7.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z6.d
; CHECK-NEXT: uzp1 p7.s, p7.s, p8.s
; CHECK-NEXT: cmphi p9.d, p0/z, z5.d, z3.d
; CHECK-NEXT: cmphi p3.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z2.d
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p2.h, p2.h, p7.h
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z1.d
; CHECK-NEXT: cmphi p0.d, p0/z, z5.d, z0.d
; CHECK-NEXT: uzp1 p4.s, p5.s, p4.s
; CHECK-NEXT: uzp1 p5.s, p9.s, p6.s
; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: whilelo p6.b, xzr, x8
; CHECK-NEXT: uzp1 p3.s, p8.s, p3.s
; CHECK-NEXT: cmp x9, #1
; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.s, p0.s, p7.s
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p4.h, p5.h, p4.h
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p0.h, p3.h
; CHECK-NEXT: uzp1 p1.b, p1.b, p2.b
; CHECK-NEXT: uzp1 p2.b, p0.b, p4.b
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: whilelo p3.b, xzr, x8
; CHECK-NEXT: sel p0.b, p1, p1.b, p6.b
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: sel p1.b, p2, p2.b, p3.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 2)
ret <vscale x 32 x i1> %0
}
define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_32_expand:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #3
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #2
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: mov z3.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z2.d, all, mul #2
; CHECK-NEXT: cmphi p1.d, p0/z, z3.d, z0.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z1.d
; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z2.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z4.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p3.s, p0.s
; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
; CHECK-NEXT: whilelo p1.h, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 4)
ret <vscale x 8 x i1> %0
}
define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_32_expand2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #3
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #2
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, z0.d
; CHECK-NEXT: mov z2.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: incd z5.d, all, mul #4
; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z1.d
; CHECK-NEXT: incd z1.d, all, mul #4
; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z4.d
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p4.d, p0/z, z2.d, z5.d
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: cmphi p5.d, p0/z, z2.d, z1.d
; CHECK-NEXT: cmphi p7.d, p0/z, z2.d, z4.d
; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z2.d, z3.d
; CHECK-NEXT: uzp1 p2.s, p4.s, p5.s
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: incd z0.d, all, mul #4
; CHECK-NEXT: uzp1 p3.s, p3.s, p6.s
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z0.d
; CHECK-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p7.s, p0.s
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p2.h, p0.h
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 4)
ret <vscale x 16 x i1> %0
}
define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_32_expand3:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #3
; CHECK-NEXT: incb x0, all, mul #4
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #2
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z2.d, all, mul #2
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z0.d
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: mov z6.d, z2.d
; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: cmphi p2.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p3.d, p0/z, z5.d, z2.d
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z1.d
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: incd z6.d, all, mul #4
; CHECK-NEXT: incd z7.d, all, mul #4
; CHECK-NEXT: uzp1 p4.s, p5.s, p4.s
; CHECK-NEXT: mov z24.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z6.d
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z7.d
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z3.d
; CHECK-NEXT: incd z24.d, all, mul #4
; CHECK-NEXT: uzp1 p2.s, p2.s, p7.s
; CHECK-NEXT: uzp1 p3.s, p3.s, p8.s
; CHECK-NEXT: cmphi p9.d, p0/z, z5.d, z24.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: uzp1 p3.h, p4.h, p3.h
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p6.s, p6.s, p9.s
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: uzp1 p2.h, p2.h, p6.h
; CHECK-NEXT: add x9, x8, #3
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: uzp1 p2.b, p3.b, p2.b
; CHECK-NEXT: asr x8, x8, #2
; CHECK-NEXT: mov z5.d, x8
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z24.d
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z6.d
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z7.d
; CHECK-NEXT: cmphi p9.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z3.d
; CHECK-NEXT: cmphi p10.d, p0/z, z5.d, z2.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z1.d
; CHECK-NEXT: cmphi p0.d, p0/z, z5.d, z0.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: uzp1 p5.s, p7.s, p5.s
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: uzp1 p7.s, p9.s, p8.s
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p4.s, p10.s, p4.s
; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.s, p0.s, p6.s
; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p5.h, p7.h, p5.h
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p0.h, p4.h
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: whilelo p4.b, xzr, x8
; CHECK-NEXT: uzp1 p3.b, p0.b, p5.b
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: sel p0.b, p2, p2.b, p1.b
; CHECK-NEXT: sel p1.b, p3, p3.b, p4.b
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 4)
ret <vscale x 32 x i1> %0
}
define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_64_expand:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #7
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #3
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z0.d
; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z1.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p1.s, p0.s
; CHECK-NEXT: whilelo p1.s, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 8)
ret <vscale x 4 x i1> %0
}
define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_64_expand2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #7
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #3
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: mov z3.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z2.d, all, mul #2
; CHECK-NEXT: cmphi p1.d, p0/z, z3.d, z0.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z1.d
; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z2.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z4.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p3.s, p0.s
; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
; CHECK-NEXT: whilelo p1.h, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 8)
ret <vscale x 8 x i1> %0
}
define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_64_expand3:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #7
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: asr x8, x8, #3
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, z0.d
; CHECK-NEXT: mov z2.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: incd z5.d, all, mul #4
; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z1.d
; CHECK-NEXT: incd z1.d, all, mul #4
; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z4.d
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p4.d, p0/z, z2.d, z5.d
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: cmphi p5.d, p0/z, z2.d, z1.d
; CHECK-NEXT: cmphi p7.d, p0/z, z2.d, z4.d
; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z2.d, z3.d
; CHECK-NEXT: uzp1 p2.s, p4.s, p5.s
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: incd z0.d, all, mul #4
; CHECK-NEXT: uzp1 p3.s, p3.s, p6.s
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z0.d
; CHECK-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p7.s, p0.s
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p2.h, p0.h
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 8)
ret <vscale x 16 x i1> %0
}
define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_64_expand4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: subs x8, x1, x0
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x9, x8, #7
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: addvl x9, x0, #8
; CHECK-NEXT: asr x8, x8, #3
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, x8
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: incd z2.d, all, mul #2
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z0.d
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: mov z6.d, z2.d
; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: cmphi p2.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p3.d, p0/z, z5.d, z2.d
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z1.d
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: incd z6.d, all, mul #4
; CHECK-NEXT: incd z7.d, all, mul #4
; CHECK-NEXT: uzp1 p4.s, p5.s, p4.s
; CHECK-NEXT: mov z24.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z6.d
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z7.d
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z3.d
; CHECK-NEXT: incd z24.d, all, mul #4
; CHECK-NEXT: uzp1 p2.s, p2.s, p7.s
; CHECK-NEXT: uzp1 p3.s, p3.s, p8.s
; CHECK-NEXT: cmphi p9.d, p0/z, z5.d, z24.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: uzp1 p3.h, p4.h, p3.h
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p6.s, p6.s, p9.s
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: subs x8, x1, x9
; CHECK-NEXT: uzp1 p2.h, p2.h, p6.h
; CHECK-NEXT: add x9, x8, #7
; CHECK-NEXT: csel x8, x9, x8, mi
; CHECK-NEXT: uzp1 p2.b, p3.b, p2.b
; CHECK-NEXT: asr x8, x8, #3
; CHECK-NEXT: mov z5.d, x8
; CHECK-NEXT: cmphi p5.d, p0/z, z5.d, z24.d
; CHECK-NEXT: cmphi p7.d, p0/z, z5.d, z6.d
; CHECK-NEXT: cmphi p8.d, p0/z, z5.d, z7.d
; CHECK-NEXT: cmphi p9.d, p0/z, z5.d, z4.d
; CHECK-NEXT: cmphi p4.d, p0/z, z5.d, z3.d
; CHECK-NEXT: cmphi p10.d, p0/z, z5.d, z2.d
; CHECK-NEXT: cmphi p6.d, p0/z, z5.d, z1.d
; CHECK-NEXT: cmphi p0.d, p0/z, z5.d, z0.d
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: uzp1 p5.s, p7.s, p5.s
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: uzp1 p7.s, p9.s, p8.s
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p4.s, p10.s, p4.s
; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.s, p0.s, p6.s
; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p5.h, p7.h, p5.h
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p0.h, p4.h
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: whilelo p4.b, xzr, x8
; CHECK-NEXT: uzp1 p3.b, p0.b, p5.b
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: sel p0.b, p2, p2.b, p1.b
; CHECK-NEXT: sel p1.b, p3, p3.b, p4.b
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 8)
ret <vscale x 32 x i1> %0
}
define <vscale x 9 x i1> @whilewr_8_widen(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_8_widen:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.b, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 9 x i1> @llvm.loop.dependence.war.mask.nxv9i1(ptr %a, ptr %b, i64 1)
ret <vscale x 9 x i1> %0
}
define <vscale x 7 x i1> @whilewr_16_widen(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_16_widen:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.h, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 7 x i1> @llvm.loop.dependence.war.mask.nxv7i1(ptr %a, ptr %b, i64 2)
ret <vscale x 7 x i1> %0
}
define <vscale x 3 x i1> @whilewr_32_widen(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_32_widen:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: whilewr p0.s, x0, x1
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 3 x i1> @llvm.loop.dependence.war.mask.nxv3i1(ptr %a, ptr %b, i64 4)
ret <vscale x 3 x i1> %0
}
define <vscale x 16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
; CHECK-LABEL: whilewr_badimm:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: index z0.d, #0, #1
; CHECK-NEXT: mov x8, #6148914691236517205 // =0x5555555555555555
; CHECK-NEXT: sub x9, x1, x0
; CHECK-NEXT: movk x8, #21846
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: smulh x8, x9, x8
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z4.d, z0.d
; CHECK-NEXT: mov z5.d, z0.d
; CHECK-NEXT: incd z1.d
; CHECK-NEXT: add x8, x8, x8, lsr #63
; CHECK-NEXT: incd z4.d, all, mul #2
; CHECK-NEXT: incd z5.d, all, mul #4
; CHECK-NEXT: mov z2.d, x8
; CHECK-NEXT: mov z3.d, z1.d
; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d
; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z1.d
; CHECK-NEXT: incd z1.d, all, mul #4
; CHECK-NEXT: incd z3.d, all, mul #2
; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z4.d
; CHECK-NEXT: incd z4.d, all, mul #4
; CHECK-NEXT: cmphi p4.d, p0/z, z2.d, z5.d
; CHECK-NEXT: cmphi p5.d, p0/z, z2.d, z1.d
; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: cmphi p6.d, p0/z, z2.d, z3.d
; CHECK-NEXT: cmphi p7.d, p0/z, z2.d, z4.d
; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
; CHECK-NEXT: incd z0.d, all, mul #4
; CHECK-NEXT: uzp1 p2.s, p4.s, p5.s
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p3.s, p3.s, p6.s
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z0.d
; CHECK-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-NEXT: cmp x8, #1
; CHECK-NEXT: cset w8, lt
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: uzp1 p0.s, p7.s, p0.s
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p0.h, p2.h, p0.h
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-NEXT: whilelo p1.b, xzr, x8
; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 3)
ret <vscale x 16 x i1> %0
}