| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 |
| ; RUN: llc %s -o - | FileCheck %s --check-prefixes=COMMON,BASE |
| ;; Additional runlines to exercise lsr code which AArch64 normally wouldn't. |
| ; RUN: llc %s -o - -lsr-preferred-addressing-mode=preindexed | FileCheck %s --check-prefixes=COMMON,PREINDEX |
| ; RUN: llc %s -o - -lsr-preferred-addressing-mode=postindexed | FileCheck %s --check-prefixes=COMMON,POSTINDEX |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| define void @mulvl123_addressing(ptr %src, ptr %dst, i64 %count) #0 { |
| ; COMMON-LABEL: mulvl123_addressing: |
| ; COMMON: // %bb.0: // %entry |
| ; COMMON-NEXT: ptrue p0.b |
| ; COMMON-NEXT: mov x8, xzr |
| ; COMMON-NEXT: .LBB0_1: // %for.body |
| ; COMMON-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; COMMON-NEXT: ldr z0, [x0] |
| ; COMMON-NEXT: ldr z1, [x0, #1, mul vl] |
| ; COMMON-NEXT: ldr z2, [x0, #2, mul vl] |
| ; COMMON-NEXT: ldr z3, [x0, #3, mul vl] |
| ; COMMON-NEXT: addvl x0, x0, #5 |
| ; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b |
| ; COMMON-NEXT: movprfx z1, z2 |
| ; COMMON-NEXT: umax z1.b, p0/m, z1.b, z3.b |
| ; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b |
| ; COMMON-NEXT: st1b { z0.b }, p0, [x1, x8] |
| ; COMMON-NEXT: incb x8 |
| ; COMMON-NEXT: cmp x8, x2 |
| ; COMMON-NEXT: b.lo .LBB0_1 |
| ; COMMON-NEXT: // %bb.2: // %for.exit |
| ; COMMON-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %2 = shl nuw nsw i64 %vscale, 4 |
| %mul = shl nuw nsw i64 %vscale, 6 |
| br label %for.body |
| |
| for.body: |
| %src.addr = phi ptr [ %src, %entry ], [ %src.addr.next, %for.body ] |
| %idx = phi i64 [ 0, %entry ], [ %idx.next, %for.body ] |
| %arrayidx = getelementptr inbounds i8, ptr %src.addr, i64 %idx |
| %3 = load <vscale x 16 x i8>, ptr %arrayidx |
| %4 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 1 |
| %5 = load <vscale x 16 x i8>, ptr %4 |
| %6 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 2 |
| %7 = load <vscale x 16 x i8>, ptr %6 |
| %8 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 3 |
| %9 = load <vscale x 16 x i8>, ptr %8 |
| %10 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %3, <vscale x 16 x i8> %5) |
| %11 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %7, <vscale x 16 x i8> %9) |
| %12 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %10, <vscale x 16 x i8> %11) |
| %src.addr.next = getelementptr inbounds i8, ptr %src.addr, i64 %mul |
| %arrayidx4 = getelementptr inbounds i8, ptr %dst, i64 %idx |
| store <vscale x 16 x i8> %12, ptr %arrayidx4 |
| %idx.next = add i64 %idx, %2 |
| %cmp = icmp ult i64 %idx.next, %count |
| br i1 %cmp, label %for.body, label %for.exit |
| |
| for.exit: |
| ret void |
| } |
| |
| define void @many_mulvl1_addressing(ptr %src_rows, ptr %dst_rows, i64 %stride, i64 %count) #0 { |
| ; COMMON-LABEL: many_mulvl1_addressing: |
| ; COMMON: // %bb.0: // %entry |
| ; COMMON-NEXT: ptrue p0.b |
| ; COMMON-NEXT: ptrue p1.h |
| ; COMMON-NEXT: .LBB1_1: // %for.body |
| ; COMMON-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; COMMON-NEXT: add x8, x0, x2 |
| ; COMMON-NEXT: ldr z0, [x0] |
| ; COMMON-NEXT: ld1b { z1.b }, p0/z, [x0, x2] |
| ; COMMON-NEXT: ldr z2, [x0, #1, mul vl] |
| ; COMMON-NEXT: ldr z3, [x8, #1, mul vl] |
| ; COMMON-NEXT: incb x0, all, mul #2 |
| ; COMMON-NEXT: subs x3, x3, #1 |
| ; COMMON-NEXT: add z0.b, z0.b, z1.b |
| ; COMMON-NEXT: add z1.b, z2.b, z3.b |
| ; COMMON-NEXT: st1b { z0.h }, p1, [x1] |
| ; COMMON-NEXT: st1b { z1.h }, p1, [x1, #1, mul vl] |
| ; COMMON-NEXT: incb x1, all, mul #2 |
| ; COMMON-NEXT: b.ne .LBB1_1 |
| ; COMMON-NEXT: // %bb.2: // %for.exit |
| ; COMMON-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %mul = shl nuw nsw i64 %vscale, 5 |
| br label %for.body |
| |
| for.body: |
| %src_row_addr = phi ptr [ %src_rows, %entry ], [ %add_ptr_src, %for.body ] |
| %dst_row_addr = phi ptr [ %dst_rows, %entry ], [ %add_ptr_dst, %for.body ] |
| %idx = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| %2 = load <vscale x 16 x i8>, ptr %src_row_addr |
| %3 = getelementptr <vscale x 16 x i8>, ptr %src_row_addr, i64 1 |
| %4 = load <vscale x 16 x i8>, ptr %3 |
| %arrayidx2 = getelementptr inbounds i8, ptr %src_row_addr, i64 %stride |
| %5 = load <vscale x 16 x i8>, ptr %arrayidx2 |
| %6 = getelementptr <vscale x 16 x i8>, ptr %arrayidx2, i64 1 |
| %7 = load <vscale x 16 x i8>, ptr %6 |
| %8 = add <vscale x 16 x i8> %2, %5 |
| %9 = add <vscale x 16 x i8> %4, %7 |
| %10 = bitcast <vscale x 16 x i8> %8 to <vscale x 8 x i16> |
| %11 = trunc <vscale x 8 x i16> %10 to <vscale x 8 x i8> |
| store <vscale x 8 x i8> %11, ptr %dst_row_addr |
| %12 = bitcast <vscale x 16 x i8> %9 to <vscale x 8 x i16> |
| %13 = getelementptr <vscale x 8 x i8>, ptr %dst_row_addr, i64 1 |
| %14 = trunc <vscale x 8 x i16> %12 to <vscale x 8 x i8> |
| store <vscale x 8 x i8> %14, ptr %13 |
| %add_ptr_src = getelementptr inbounds i8, ptr %src_row_addr, i64 %mul |
| %add_ptr_dst = getelementptr inbounds i8, ptr %dst_row_addr, i64 %mul |
| %inc = add nuw i64 %idx, 1 |
| %exitcond = icmp eq i64 %inc, %count |
| br i1 %exitcond, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| } |
| |
| define void @fixed_iv_scalable_offset(ptr %src, ptr %dst, i64 %count) #0 { |
| ; COMMON-LABEL: fixed_iv_scalable_offset: |
| ; COMMON: // %bb.0: // %entry |
| ; COMMON-NEXT: .LBB2_1: // %for.body |
| ; COMMON-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; COMMON-NEXT: ldr z0, [x0] |
| ; COMMON-NEXT: ldr z1, [x0, #4, mul vl] |
| ; COMMON-NEXT: subs x2, x2, #4 |
| ; COMMON-NEXT: add x0, x0, #16 |
| ; COMMON-NEXT: add z0.s, z0.s, z1.s |
| ; COMMON-NEXT: str z0, [x1] |
| ; COMMON-NEXT: add x1, x1, #16 |
| ; COMMON-NEXT: b.ne .LBB2_1 |
| ; COMMON-NEXT: // %bb.2: // %for.exit |
| ; COMMON-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %mul = shl nuw nsw i64 %vscale, 4 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| %src.ptr = getelementptr inbounds i32, ptr %src, i64 %iv |
| %data = load <vscale x 4 x i32>, ptr %src.ptr |
| %src.ptr.offset = getelementptr inbounds i32, ptr %src.ptr, i64 %mul |
| %data2 = load <vscale x 4 x i32>, ptr %src.ptr.offset |
| %add = add <vscale x 4 x i32> %data, %data2 |
| %dst.ptr = getelementptr i32, ptr %dst, i64 %iv |
| store <vscale x 4 x i32> %add, ptr %dst.ptr |
| %inc = add nuw i64 %iv, 4 |
| %exit.cond = icmp eq i64 %inc, %count |
| br i1 %exit.cond, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| } |
| |
| define void @mixed_offsets_scalable_then_fixed(ptr %src, ptr %dst, i64 %count) #0 { |
| ; BASE-LABEL: mixed_offsets_scalable_then_fixed: |
| ; BASE: // %bb.0: // %entry |
| ; BASE-NEXT: incb x0, all, mul #4 |
| ; BASE-NEXT: ptrue p0.s |
| ; BASE-NEXT: mov x8, #8 // =0x8 |
| ; BASE-NEXT: .LBB3_1: // %for.body |
| ; BASE-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; BASE-NEXT: ldr z0, [x0, #-4, mul vl] |
| ; BASE-NEXT: ldr z1, [x0] |
| ; BASE-NEXT: decw x2 |
| ; BASE-NEXT: ld1w { z2.s }, p0/z, [x0, x8, lsl #2] |
| ; BASE-NEXT: incb x0 |
| ; BASE-NEXT: add z0.s, z0.s, z1.s |
| ; BASE-NEXT: add z0.s, z0.s, z2.s |
| ; BASE-NEXT: str z0, [x1] |
| ; BASE-NEXT: incb x1 |
| ; BASE-NEXT: cbnz x2, .LBB3_1 |
| ; BASE-NEXT: // %bb.2: // %for.exit |
| ; BASE-NEXT: ret |
| ; |
| ; PREINDEX-LABEL: mixed_offsets_scalable_then_fixed: |
| ; PREINDEX: // %bb.0: // %entry |
| ; PREINDEX-NEXT: incb x0, all, mul #4 |
| ; PREINDEX-NEXT: ptrue p0.s |
| ; PREINDEX-NEXT: mov x8, #8 // =0x8 |
| ; PREINDEX-NEXT: .LBB3_1: // %for.body |
| ; PREINDEX-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; PREINDEX-NEXT: ldr z0, [x0, #-4, mul vl] |
| ; PREINDEX-NEXT: ldr z1, [x0] |
| ; PREINDEX-NEXT: decw x2 |
| ; PREINDEX-NEXT: ld1w { z2.s }, p0/z, [x0, x8, lsl #2] |
| ; PREINDEX-NEXT: incb x0 |
| ; PREINDEX-NEXT: add z0.s, z0.s, z1.s |
| ; PREINDEX-NEXT: add z0.s, z0.s, z2.s |
| ; PREINDEX-NEXT: str z0, [x1] |
| ; PREINDEX-NEXT: incb x1 |
| ; PREINDEX-NEXT: cbnz x2, .LBB3_1 |
| ; PREINDEX-NEXT: // %bb.2: // %for.exit |
| ; PREINDEX-NEXT: ret |
| ; |
| ; POSTINDEX-LABEL: mixed_offsets_scalable_then_fixed: |
| ; POSTINDEX: // %bb.0: // %entry |
| ; POSTINDEX-NEXT: incb x0, all, mul #4 |
| ; POSTINDEX-NEXT: ptrue p0.s |
| ; POSTINDEX-NEXT: mov x8, xzr |
| ; POSTINDEX-NEXT: mov x9, #8 // =0x8 |
| ; POSTINDEX-NEXT: .LBB3_1: // %for.body |
| ; POSTINDEX-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; POSTINDEX-NEXT: ldr z0, [x0, #-4, mul vl] |
| ; POSTINDEX-NEXT: ldr z1, [x0] |
| ; POSTINDEX-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2] |
| ; POSTINDEX-NEXT: incb x0 |
| ; POSTINDEX-NEXT: add z0.s, z0.s, z1.s |
| ; POSTINDEX-NEXT: add z0.s, z0.s, z2.s |
| ; POSTINDEX-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] |
| ; POSTINDEX-NEXT: incw x8 |
| ; POSTINDEX-NEXT: cmp x2, x8 |
| ; POSTINDEX-NEXT: b.ne .LBB3_1 |
| ; POSTINDEX-NEXT: // %bb.2: // %for.exit |
| ; POSTINDEX-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %mul = shl nuw nsw i64 %vscale, 4 |
| %vl = shl nuw nsw i64 %vscale, 2 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| %src.ptr = getelementptr inbounds i32, ptr %src, i64 %iv |
| %data = load <vscale x 4 x i32>, ptr %src.ptr |
| %src.ptr.sc_off = getelementptr inbounds i32, ptr %src.ptr, i64 %mul |
| %data2 = load <vscale x 4 x i32>, ptr %src.ptr.sc_off |
| %src.ptr.fx_off = getelementptr inbounds i32, ptr %src.ptr.sc_off, i64 8 |
| %data3 = load <vscale x 4 x i32>, ptr %src.ptr.fx_off |
| %add = add <vscale x 4 x i32> %data, %data2 |
| %add2 = add <vscale x 4 x i32> %add, %data3 |
| %dst.ptr = getelementptr i32, ptr %dst, i64 %iv |
| store <vscale x 4 x i32> %add2, ptr %dst.ptr |
| %inc = add nuw i64 %iv, %vl |
| %exit.cond = icmp eq i64 %inc, %count |
| br i1 %exit.cond, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| } |
| |
| define void @mixed_offsets_fixed_then_scalable(ptr %src, ptr %dst, i64 %count) #0 { |
| ; COMMON-LABEL: mixed_offsets_fixed_then_scalable: |
| ; COMMON: // %bb.0: // %entry |
| ; COMMON-NEXT: mov x9, x0 |
| ; COMMON-NEXT: ptrue p0.s |
| ; COMMON-NEXT: mov x8, xzr |
| ; COMMON-NEXT: incb x9, all, mul #4 |
| ; COMMON-NEXT: mov x10, #8 // =0x8 |
| ; COMMON-NEXT: add x9, x9, #32 |
| ; COMMON-NEXT: .LBB4_1: // %for.body |
| ; COMMON-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; COMMON-NEXT: add x11, x0, x8, lsl #2 |
| ; COMMON-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] |
| ; COMMON-NEXT: ld1w { z2.s }, p0/z, [x9, x8, lsl #2] |
| ; COMMON-NEXT: ld1w { z1.s }, p0/z, [x11, x10, lsl #2] |
| ; COMMON-NEXT: add z0.s, z0.s, z1.s |
| ; COMMON-NEXT: add z0.s, z0.s, z2.s |
| ; COMMON-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] |
| ; COMMON-NEXT: incw x8 |
| ; COMMON-NEXT: cmp x2, x8 |
| ; COMMON-NEXT: b.ne .LBB4_1 |
| ; COMMON-NEXT: // %bb.2: // %for.exit |
| ; COMMON-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %mul = shl nuw nsw i64 %vscale, 4 |
| %vl = shl nuw nsw i64 %vscale, 2 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| %src.ptr = getelementptr inbounds i32, ptr %src, i64 %iv |
| %data = load <vscale x 4 x i32>, ptr %src.ptr |
| %src.ptr.fx_off = getelementptr inbounds i32, ptr %src.ptr, i64 8 |
| %data2 = load <vscale x 4 x i32>, ptr %src.ptr.fx_off |
| %src.ptr.sc_off = getelementptr inbounds i32, ptr %src.ptr.fx_off, i64 %mul |
| %data3 = load <vscale x 4 x i32>, ptr %src.ptr.sc_off |
| %add = add <vscale x 4 x i32> %data, %data2 |
| %add2 = add <vscale x 4 x i32> %add, %data3 |
| %dst.ptr = getelementptr i32, ptr %dst, i64 %iv |
| store <vscale x 4 x i32> %add2, ptr %dst.ptr |
| %inc = add nuw i64 %iv, %vl |
| %exit.cond = icmp eq i64 %inc, %count |
| br i1 %exit.cond, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| } |
| |
| ;; FIXME: There's an opportunity here (that we currently miss) to define the phi |
| ;; on the middle access, and have negative and positive scalable immediates. |
| ;; |
| ;; Currently we generate a scalable offset for the load in range of the base, |
| ;; and a register to store the offset for the access that's out of range of the |
| ;; base (but in range of the other). |
| ;; |
| define void @three_access_wide_gap(ptr %src, ptr %dst, i64 %count) #0 { |
| ; BASE-LABEL: three_access_wide_gap: |
| ; BASE: // %bb.0: // %entry |
| ; BASE-NEXT: .LBB5_1: // %for.body |
| ; BASE-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; BASE-NEXT: ldr z0, [x0] |
| ; BASE-NEXT: ldr z1, [x0, #4, mul vl] |
| ; BASE-NEXT: decw x2 |
| ; BASE-NEXT: ldr z2, [x0, #8, mul vl] |
| ; BASE-NEXT: incb x0 |
| ; BASE-NEXT: add z0.s, z0.s, z1.s |
| ; BASE-NEXT: add z0.s, z0.s, z2.s |
| ; BASE-NEXT: str z0, [x1] |
| ; BASE-NEXT: incb x1 |
| ; BASE-NEXT: cbnz x2, .LBB5_1 |
| ; BASE-NEXT: // %bb.2: // %for.exit |
| ; BASE-NEXT: ret |
| ; |
| ; PREINDEX-LABEL: three_access_wide_gap: |
| ; PREINDEX: // %bb.0: // %entry |
| ; PREINDEX-NEXT: .LBB5_1: // %for.body |
| ; PREINDEX-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; PREINDEX-NEXT: ldr z0, [x0] |
| ; PREINDEX-NEXT: ldr z1, [x0, #4, mul vl] |
| ; PREINDEX-NEXT: decw x2 |
| ; PREINDEX-NEXT: ldr z2, [x0, #8, mul vl] |
| ; PREINDEX-NEXT: incb x0 |
| ; PREINDEX-NEXT: add z0.s, z0.s, z1.s |
| ; PREINDEX-NEXT: add z0.s, z0.s, z2.s |
| ; PREINDEX-NEXT: str z0, [x1] |
| ; PREINDEX-NEXT: incb x1 |
| ; PREINDEX-NEXT: cbnz x2, .LBB5_1 |
| ; PREINDEX-NEXT: // %bb.2: // %for.exit |
| ; PREINDEX-NEXT: ret |
| ; |
| ; POSTINDEX-LABEL: three_access_wide_gap: |
| ; POSTINDEX: // %bb.0: // %entry |
| ; POSTINDEX-NEXT: ptrue p0.s |
| ; POSTINDEX-NEXT: mov x8, xzr |
| ; POSTINDEX-NEXT: .LBB5_1: // %for.body |
| ; POSTINDEX-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; POSTINDEX-NEXT: ldr z0, [x0] |
| ; POSTINDEX-NEXT: ldr z1, [x0, #4, mul vl] |
| ; POSTINDEX-NEXT: ldr z2, [x0, #8, mul vl] |
| ; POSTINDEX-NEXT: incb x0 |
| ; POSTINDEX-NEXT: add z0.s, z0.s, z1.s |
| ; POSTINDEX-NEXT: add z0.s, z0.s, z2.s |
| ; POSTINDEX-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] |
| ; POSTINDEX-NEXT: incw x8 |
| ; POSTINDEX-NEXT: cmp x2, x8 |
| ; POSTINDEX-NEXT: b.ne .LBB5_1 |
| ; POSTINDEX-NEXT: // %bb.2: // %for.exit |
| ; POSTINDEX-NEXT: ret |
| entry: |
| %vscale = tail call i64 @llvm.vscale.i64() |
| %mul = mul nuw nsw i64 %vscale, 16 |
| %mul2 = mul nuw nsw i64 %vscale, 16 |
| %vl = mul nuw nsw i64 %vscale, 4 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| %src.ptr = getelementptr inbounds i32, ptr %src, i64 %iv |
| %data = load <vscale x 4 x i32>, ptr %src.ptr |
| %src.ptr.sc_off = getelementptr inbounds i32, ptr %src.ptr, i64 %mul |
| %data2 = load <vscale x 4 x i32>, ptr %src.ptr.sc_off |
| %src.ptr.sc_off2 = getelementptr inbounds i32, ptr %src.ptr.sc_off, i64 %mul2 |
| %data3 = load <vscale x 4 x i32>, ptr %src.ptr.sc_off2 |
| %add = add <vscale x 4 x i32> %data, %data2 |
| %add2 = add <vscale x 4 x i32> %add, %data3 |
| %dst.ptr = getelementptr i32, ptr %dst, i64 %iv |
| store <vscale x 4 x i32> %add2, ptr %dst.ptr |
| %inc = add nuw i64 %iv, %vl |
| %exit.cond = icmp eq i64 %inc, %count |
| br i1 %exit.cond, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| } |
| |
| ;; Here are two writes that should be `16 * vscale * vscale` apart, so MUL VL |
| ;; addressing cannot be used to offset the second write, as for example, |
| ;; `#4, mul vl` would only be an offset of `16 * vscale` (dropping a vscale). |
| define void @vscale_squared_offset(ptr %alloc) #0 { |
| ; COMMON-LABEL: vscale_squared_offset: |
| ; COMMON: // %bb.0: // %entry |
| ; COMMON-NEXT: rdvl x9, #1 |
| ; COMMON-NEXT: rdvl x10, #4 |
| ; COMMON-NEXT: fmov z0.s, #4.00000000 |
| ; COMMON-NEXT: lsr x9, x9, #4 |
| ; COMMON-NEXT: fmov z1.s, #8.00000000 |
| ; COMMON-NEXT: mov x8, xzr |
| ; COMMON-NEXT: ptrue p0.s, vl1 |
| ; COMMON-NEXT: umull x9, w9, w10 |
| ; COMMON-NEXT: cntw x10 |
| ; COMMON-NEXT: cmp x8, x10 |
| ; COMMON-NEXT: b.ge .LBB6_2 |
| ; COMMON-NEXT: .LBB6_1: // %for.body |
| ; COMMON-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; COMMON-NEXT: add x11, x0, x9 |
| ; COMMON-NEXT: st1w { z0.s }, p0, [x0] |
| ; COMMON-NEXT: incb x0 |
| ; COMMON-NEXT: st1w { z1.s }, p0, [x11] |
| ; COMMON-NEXT: add x8, x8, #1 |
| ; COMMON-NEXT: cmp x8, x10 |
| ; COMMON-NEXT: b.lt .LBB6_1 |
| ; COMMON-NEXT: .LBB6_2: // %for.exit |
| ; COMMON-NEXT: ret |
| entry: |
| %vscale = call i64 @llvm.vscale.i64() |
| %c4_vscale = mul i64 %vscale, 4 |
| br label %for.check |
| for.check: |
| %i = phi i64 [ %next_i, %for.body ], [ 0, %entry ] |
| %is_lt = icmp slt i64 %i, %c4_vscale |
| br i1 %is_lt, label %for.body, label %for.exit |
| for.body: |
| %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 0, i64 1) |
| %upper_offset = mul i64 %i, %c4_vscale |
| %upper_ptr = getelementptr float, ptr %alloc, i64 %upper_offset |
| call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 4.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr %upper_ptr, i32 4, <vscale x 4 x i1> %mask) |
| %lower_i = add i64 %i, %c4_vscale |
| %lower_offset = mul i64 %lower_i, %c4_vscale |
| %lower_ptr = getelementptr float, ptr %alloc, i64 %lower_offset |
| call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 8.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), ptr %lower_ptr, i32 4, <vscale x 4 x i1> %mask) |
| %next_i = add i64 %i, 1 |
| br label %for.check |
| for.exit: |
| ret void |
| } |
| |
| attributes #0 = { "target-features"="+sve2" vscale_range(1,16) } |