| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s |
| |
| define float @minf32(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) { |
| ; CHECK-LABEL: minf32: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: cmp r3, #1 |
| ; CHECK-NEXT: it lt |
| ; CHECK-NEXT: poplt {r7, pc} |
| ; CHECK-NEXT: .LBB0_1: @ %vector.ph |
| ; CHECK-NEXT: dlstp.32 lr, r3 |
| ; CHECK-NEXT: .LBB0_2: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrw.u32 q0, [r0], #16 |
| ; CHECK-NEXT: vldrw.u32 q1, [r1], #16 |
| ; CHECK-NEXT: vabs.f32 q0, q0 |
| ; CHECK-NEXT: vminnm.f32 q0, q0, q1 |
| ; CHECK-NEXT: vstrw.32 q0, [r2], #16 |
| ; CHECK-NEXT: letp lr, .LBB0_2 |
| ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| %cmp8 = icmp sgt i32 %n, 0 |
| br i1 %cmp8, label %vector.ph, label %for.cond.cleanup |
| |
| vector.ph: ; preds = %entry |
| %n.rnd.up = add i32 %n, 3 |
| %n.vec = and i32 %n.rnd.up, -4 |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %vector.ph |
| %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) |
| %0 = getelementptr inbounds float, ptr %s1, i32 %index |
| %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) |
| %1 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load) |
| %2 = getelementptr inbounds float, ptr %s2, i32 %index |
| %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) |
| %3 = call fast <4 x float> @llvm.minnum.v4f32(<4 x float> %1, <4 x float> %wide.masked.load10) |
| %4 = getelementptr inbounds float, ptr %d, i32 %index |
| call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %active.lane.mask) |
| %index.next = add i32 %index, 4 |
| %5 = icmp eq i32 %index.next, %n.vec |
| br i1 %5, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body, %entry |
| ret float undef |
| } |
| |
| define float @maxaf32(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) { |
| ; CHECK-LABEL: maxaf32: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: cmp r3, #1 |
| ; CHECK-NEXT: it lt |
| ; CHECK-NEXT: poplt {r7, pc} |
| ; CHECK-NEXT: .LBB1_1: @ %vector.ph |
| ; CHECK-NEXT: dlstp.32 lr, r3 |
| ; CHECK-NEXT: .LBB1_2: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrw.u32 q0, [r1], #16 |
| ; CHECK-NEXT: vldrw.u32 q1, [r0], #16 |
| ; CHECK-NEXT: vmaxnma.f32 q1, q0 |
| ; CHECK-NEXT: vstrw.32 q1, [r2], #16 |
| ; CHECK-NEXT: letp lr, .LBB1_2 |
| ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| %cmp8 = icmp sgt i32 %n, 0 |
| br i1 %cmp8, label %vector.ph, label %for.cond.cleanup |
| |
| vector.ph: ; preds = %entry |
| %n.rnd.up = add i32 %n, 3 |
| %n.vec = and i32 %n.rnd.up, -4 |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %vector.ph |
| %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) |
| %0 = getelementptr inbounds float, ptr %s1, i32 %index |
| %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) |
| %1 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load) |
| %2 = getelementptr inbounds float, ptr %s2, i32 %index |
| %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) |
| %3 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load10) |
| %4 = call fast <4 x float> @llvm.maxnum.v4f32(<4 x float> %1, <4 x float> %3) |
| %5 = getelementptr inbounds float, ptr %d, i32 %index |
| call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %active.lane.mask) |
| %index.next = add i32 %index, 4 |
| %6 = icmp eq i32 %index.next, %n.vec |
| br i1 %6, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body, %entry |
| ret float undef |
| } |
| |
| |
| define half @maxf16(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) { |
| ; CHECK-LABEL: maxf16: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: cmp r3, #1 |
| ; CHECK-NEXT: it lt |
| ; CHECK-NEXT: poplt {r7, pc} |
| ; CHECK-NEXT: .LBB2_1: @ %vector.ph |
| ; CHECK-NEXT: dlstp.16 lr, r3 |
| ; CHECK-NEXT: .LBB2_2: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrh.u16 q0, [r0], #16 |
| ; CHECK-NEXT: vldrh.u16 q1, [r1], #16 |
| ; CHECK-NEXT: vabs.f16 q0, q0 |
| ; CHECK-NEXT: vmaxnm.f16 q0, q0, q1 |
| ; CHECK-NEXT: vstrh.16 q0, [r2], #16 |
| ; CHECK-NEXT: letp lr, .LBB2_2 |
| ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| %cmp10 = icmp sgt i32 %n, 0 |
| br i1 %cmp10, label %vector.ph, label %for.cond.cleanup |
| |
| vector.ph: ; preds = %entry |
| %n.rnd.up = add i32 %n, 7 |
| %n.vec = and i32 %n.rnd.up, -8 |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %vector.ph |
| %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n) |
| %0 = getelementptr inbounds half, ptr %s1, i32 %index |
| %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) |
| %1 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load) |
| %2 = getelementptr inbounds half, ptr %s2, i32 %index |
| %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) |
| %3 = call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %1, <8 x half> %wide.masked.load12) |
| %4 = getelementptr inbounds half, ptr %d, i32 %index |
| call void @llvm.masked.store.v8f16.p0(<8 x half> %3, ptr %4, i32 2, <8 x i1> %active.lane.mask) |
| %index.next = add i32 %index, 8 |
| %5 = icmp eq i32 %index.next, %n.vec |
| br i1 %5, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body, %entry |
| ret half undef |
| } |
| |
| define half @minaf16(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) { |
| ; CHECK-LABEL: minaf16: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: cmp r3, #1 |
| ; CHECK-NEXT: it lt |
| ; CHECK-NEXT: poplt {r7, pc} |
| ; CHECK-NEXT: .LBB3_1: @ %vector.ph |
| ; CHECK-NEXT: dlstp.16 lr, r3 |
| ; CHECK-NEXT: .LBB3_2: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrh.u16 q0, [r1], #16 |
| ; CHECK-NEXT: vldrh.u16 q1, [r0], #16 |
| ; CHECK-NEXT: vminnma.f16 q1, q0 |
| ; CHECK-NEXT: vstrh.16 q1, [r2], #16 |
| ; CHECK-NEXT: letp lr, .LBB3_2 |
| ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| %cmp10 = icmp sgt i32 %n, 0 |
| br i1 %cmp10, label %vector.ph, label %for.cond.cleanup |
| |
| vector.ph: ; preds = %entry |
| %n.rnd.up = add i32 %n, 7 |
| %n.vec = and i32 %n.rnd.up, -8 |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %vector.ph |
| %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n) |
| %0 = getelementptr inbounds half, ptr %s1, i32 %index |
| %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) |
| %1 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load) |
| %2 = getelementptr inbounds half, ptr %s2, i32 %index |
| %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) |
| %3 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load12) |
| %4 = call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %1, <8 x half> %3) |
| %5 = getelementptr inbounds half, ptr %d, i32 %index |
| call void @llvm.masked.store.v8f16.p0(<8 x half> %4, ptr %5, i32 2, <8 x i1> %active.lane.mask) |
| %index.next = add i32 %index, 8 |
| %6 = icmp eq i32 %index.next, %n.vec |
| br i1 %6, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body, %entry |
| ret half undef |
| } |
| |
| declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) |
| declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) |
| declare <4 x float> @llvm.fabs.v4f32(<4 x float>) |
| declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) |
| declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) |
| declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>) |
| declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) |
| declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>) |
| declare <8 x half> @llvm.fabs.v8f16(<8 x half>) |
| declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) |
| declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) |
| declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>) |