| ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 |
| ; REQUIRES: asserts |
| |
| ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -prefer-inloop-reductions -disable-output %s 2>&1 | FileCheck %s |
| |
| ; Tests for printing VPlans with reductions. |
| |
| define float @print_reduction(i64 %n, ptr noalias %y) { |
| ; CHECK-LABEL: 'print_reduction' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector fast ir<0.000000e+00>, ir<0.000000e+00>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP3]]>, ir<%red.next> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP6]]> |
| ; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = compute-reduction-result (fadd, in-loop) fast ir<%red.next> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi float [ %red.next, %loop ] (extra operand: vp<[[VP8]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP8]]>, middle.block ], [ ir<0.000000e+00>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi float [ %red.next, %loop ], [ 0.000000e+00, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds float, ptr %y, i64 %iv |
| ; CHECK-NEXT: IR %lv = load float, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %red.next = fadd fast float %lv, %red |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i64 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: ; preds = %entry, %loop |
| %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] |
| %red = phi float [ %red.next, %loop ], [ 0.0, %entry ] |
| %arrayidx = getelementptr inbounds float, ptr %y, i64 %iv |
| %lv = load float, ptr %arrayidx, align 4 |
| %red.next = fadd fast float %lv, %red |
| %iv.next = add i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: ; preds = %loop, %entry |
| ret float %red.next |
| } |
| |
| define void @print_reduction_with_invariant_store(i64 %n, ptr noalias %y, ptr noalias %dst) { |
| ; CHECK-LABEL: 'print_reduction_with_invariant_store' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector fast ir<0.000000e+00>, ir<0.000000e+00>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP3]]>, ir<%red.next> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP6]]> |
| ; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = compute-reduction-result (fadd, in-loop) fast ir<%red.next> |
| ; CHECK-NEXT: CLONE store vp<[[VP8]]>, ir<%dst> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP8]]>, middle.block ], [ ir<0.000000e+00>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi float [ %red.next, %loop ], [ 0.000000e+00, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds float, ptr %y, i64 %iv |
| ; CHECK-NEXT: IR %lv = load float, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %red.next = fadd fast float %lv, %red |
| ; CHECK-NEXT: IR store float %red.next, ptr %dst, align 4 |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i64 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: ; preds = %entry, %loop |
| %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] |
| %red = phi float [ %red.next, %loop ], [ 0.0, %entry ] |
| %arrayidx = getelementptr inbounds float, ptr %y, i64 %iv |
| %lv = load float, ptr %arrayidx, align 4 |
| %red.next = fadd fast float %lv, %red |
| store float %red.next, ptr %dst, align 4 |
| %iv.next = add i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: ; preds = %loop, %entry |
| ret void |
| } |
| |
| define float @print_fmuladd_strict(ptr %a, ptr %b, i64 %n) { |
| ; CHECK-LABEL: 'print_fmuladd_strict' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector nnan ninf nsz ir<0.000000e+00>, ir<0.000000e+00>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi vp<[[VP3]]>, ir<%muladd> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%l.a> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%b>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx2> |
| ; CHECK-NEXT: WIDEN ir<%l.b> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = fmul nnan ninf nsz ir<%l.a>, ir<%l.b> |
| ; CHECK-NEXT: REDUCE ir<%muladd> = ir<%sum.07> + nnan ninf nsz reduce.fadd (vp<[[VP8]]>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (fadd, in-loop) nnan ninf nsz ir<%muladd> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %muladd.lcssa = phi float [ %muladd, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0.000000e+00>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv |
| ; CHECK-NEXT: IR %l.a = load float, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv |
| ; CHECK-NEXT: IR %l.b = load float, ptr %arrayidx2, align 4 |
| ; CHECK-NEXT: IR %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %l.a, float %l.b, float %sum.07) |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %loop ] |
| %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv |
| %l.a = load float, ptr %arrayidx, align 4 |
| %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv |
| %l.b = load float, ptr %arrayidx2, align 4 |
| %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %l.a, float %l.b, float %sum.07) |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, %n |
| br i1 %exitcond.not, label %exit, label %loop |
| |
| exit: |
| ret float %muladd |
| } |
| |
| define i64 @find_last_iv(ptr %a, i64 %n, i64 %start) { |
| ; CHECK-LABEL: 'find_last_iv' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nuw nsw ir<0>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi ir<-9223372036854775808>, ir<%cond> |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr inbounds ir<%a>, vp<[[VP4]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep.a> |
| ; CHECK-NEXT: WIDEN ir<%l.a> = load vp<[[VP5]]> |
| ; CHECK-NEXT: WIDEN ir<%cmp2> = icmp eq ir<%l.a>, ir<%start> |
| ; CHECK-NEXT: WIDEN ir<%cond> = select ir<%cmp2>, ir<%iv>, ir<%rdx> |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = compute-reduction-result (smax) ir<%cond> |
| ; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = icmp ne vp<[[VP7]]>, ir<-9223372036854775808> |
| ; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = select vp<[[VP8]]>, vp<[[VP7]]>, ir<%start> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %cond.lcssa = phi i64 [ %cond, %loop ] (extra operand: vp<[[VP9]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP9]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %inc, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %start, %entry ], [ %cond, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %gep.a = getelementptr inbounds i64, ptr %a, i64 %iv |
| ; CHECK-NEXT: IR %l.a = load i64, ptr %gep.a, align 8 |
| ; CHECK-NEXT: IR %cmp2 = icmp eq i64 %l.a, %start |
| ; CHECK-NEXT: IR %cond = select i1 %cmp2, i64 %iv, i64 %rdx |
| ; CHECK-NEXT: IR %inc = add nuw nsw i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %inc, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %inc, %loop ] |
| %rdx = phi i64 [ %start, %entry ], [ %cond, %loop ] |
| %gep.a = getelementptr inbounds i64, ptr %a, i64 %iv |
| %l.a = load i64, ptr %gep.a, align 8 |
| %cmp2 = icmp eq i64 %l.a, %start |
| %cond = select i1 %cmp2, i64 %iv, i64 %rdx |
| %inc = add nuw nsw i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %inc, %n |
| br i1 %exitcond.not, label %exit, label %loop |
| |
| exit: |
| ret i64 %cond |
| } |
| |
| define i64 @print_extended_reduction(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_extended_reduction' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP7:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP7]]> = ir<%rdx> + reduce.add (ir<%load0> zext to i64) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP7]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP9]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i32, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i32, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %conv0 = zext i32 %load0 to i64 |
| ; CHECK-NEXT: IR %rdx.next = add nsw i64 %rdx, %conv0 |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i32, ptr %x, i32 %iv |
| %load0 = load i32, ptr %arrayidx, align 4 |
| %conv0 = zext i32 %load0 to i64 |
| %rdx.next = add nsw i64 %rdx, %conv0 |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i64 @print_mulacc(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_mulacc' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP8:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx1> = getelementptr inbounds ir<%y>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx1> |
| ; CHECK-NEXT: WIDEN ir<%load1> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP8]]> = ir<%rdx> + reduce.add (mul nsw ir<%load0>, ir<%load1>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i64, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i64, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %arrayidx1 = getelementptr inbounds i64, ptr %y, i32 %iv |
| ; CHECK-NEXT: IR %load1 = load i64, ptr %arrayidx1, align 4 |
| ; CHECK-NEXT: IR %mul = mul nsw i64 %load0, %load1 |
| ; CHECK-NEXT: IR %rdx.next = add nsw i64 %rdx, %mul |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i64, ptr %x, i32 %iv |
| %load0 = load i64, ptr %arrayidx, align 4 |
| %arrayidx1 = getelementptr inbounds i64, ptr %y, i32 %iv |
| %load1 = load i64, ptr %arrayidx1, align 4 |
| %mul = mul nsw i64 %load0, %load1 |
| %rdx.next = add nsw i64 %rdx, %mul |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i64 @print_mulacc_extended(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_mulacc_extended' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP8:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx1> = getelementptr inbounds ir<%y>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx1> |
| ; CHECK-NEXT: WIDEN ir<%load1> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP8]]> = ir<%rdx> + reduce.add (mul nsw (ir<%load0> sext to i64), (ir<%load1> sext to i64)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i16, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %iv |
| ; CHECK-NEXT: IR %load1 = load i16, ptr %arrayidx1, align 4 |
| ; CHECK-NEXT: IR %conv0 = sext i16 %load0 to i32 |
| ; CHECK-NEXT: IR %conv1 = sext i16 %load1 to i32 |
| ; CHECK-NEXT: IR %mul = mul nsw i32 %conv0, %conv1 |
| ; CHECK-NEXT: IR %conv = sext i32 %mul to i64 |
| ; CHECK-NEXT: IR %rdx.next = add nsw i64 %rdx, %conv |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| %load0 = load i16, ptr %arrayidx, align 4 |
| %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %iv |
| %load1 = load i16, ptr %arrayidx1, align 4 |
| %conv0 = sext i16 %load0 to i32 |
| %conv1 = sext i16 %load1 to i32 |
| %mul = mul nsw i32 %conv0, %conv1 |
| %conv = sext i32 %mul to i64 |
| %rdx.next = add nsw i64 %rdx, %conv |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i64 @print_extended_sub_reduction(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_extended_sub_reduction' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP7:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP7]]> = ir<%rdx> + reduce.sub (ir<%load0> zext to i64) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = compute-reduction-result (sub, in-loop) vp<[[VP7]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP9]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i32, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i32, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %conv0 = zext i32 %load0 to i64 |
| ; CHECK-NEXT: IR %rdx.next = sub nsw i64 %rdx, %conv0 |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i32, ptr %x, i32 %iv |
| %load0 = load i32, ptr %arrayidx, align 4 |
| %conv0 = zext i32 %load0 to i64 |
| %rdx.next = sub nsw i64 %rdx, %conv0 |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i32 @print_mulacc_sub(ptr %a, ptr %b) { |
| ; CHECK-LABEL: 'print_mulacc_sub' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<1024> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[VP3]]>, vp<[[VP8:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.a> |
| ; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.b> |
| ; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP8]]> = ir<%accum> + reduce.sub (mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (sub, in-loop) vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<1024>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %accum = phi i32 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %gep.a = getelementptr i8, ptr %a, i64 %iv |
| ; CHECK-NEXT: IR %load.a = load i8, ptr %gep.a, align 1 |
| ; CHECK-NEXT: IR %ext.a = zext i8 %load.a to i32 |
| ; CHECK-NEXT: IR %gep.b = getelementptr i8, ptr %b, i64 %iv |
| ; CHECK-NEXT: IR %load.b = load i8, ptr %gep.b, align 1 |
| ; CHECK-NEXT: IR %ext.b = zext i8 %load.b to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %ext.b, %ext.a |
| ; CHECK-NEXT: IR %add = sub i32 %accum, %mul |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, 1024 |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %accum = phi i32 [ 0, %entry ], [ %add, %loop ] |
| %gep.a = getelementptr i8, ptr %a, i64 %iv |
| %load.a = load i8, ptr %gep.a, align 1 |
| %ext.a = zext i8 %load.a to i32 |
| %gep.b = getelementptr i8, ptr %b, i64 %iv |
| %load.b = load i8, ptr %gep.b, align 1 |
| %ext.b = zext i8 %load.b to i32 |
| %mul = mul i32 %ext.b, %ext.a |
| %add = sub i32 %accum, %mul |
| %iv.next = add i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, 1024 |
| br i1 %exitcond.not, label %exit, label %loop |
| |
| exit: |
| ret i32 %add |
| } |
| |
| define i32 @print_mulacc_negated(ptr %a, ptr %b) { |
| ; CHECK-LABEL: 'print_mulacc_negated' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<1024> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[VP3]]>, vp<[[VP8:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.a> |
| ; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.b> |
| ; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP8]]> = ir<%accum> + reduce.add (sub (0, mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32))) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<1024>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %accum = phi i32 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %gep.a = getelementptr i8, ptr %a, i64 %iv |
| ; CHECK-NEXT: IR %load.a = load i8, ptr %gep.a, align 1 |
| ; CHECK-NEXT: IR %ext.a = zext i8 %load.a to i32 |
| ; CHECK-NEXT: IR %gep.b = getelementptr i8, ptr %b, i64 %iv |
| ; CHECK-NEXT: IR %load.b = load i8, ptr %gep.b, align 1 |
| ; CHECK-NEXT: IR %ext.b = zext i8 %load.b to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %ext.b, %ext.a |
| ; CHECK-NEXT: IR %sub = sub i32 0, %mul |
| ; CHECK-NEXT: IR %add = add i32 %accum, %sub |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, 1024 |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %accum = phi i32 [ 0, %entry ], [ %add, %loop ] |
| %gep.a = getelementptr i8, ptr %a, i64 %iv |
| %load.a = load i8, ptr %gep.a, align 1 |
| %ext.a = zext i8 %load.a to i32 |
| %gep.b = getelementptr i8, ptr %b, i64 %iv |
| %load.b = load i8, ptr %gep.b, align 1 |
| %ext.b = zext i8 %load.b to i32 |
| %mul = mul i32 %ext.b, %ext.a |
| %sub = sub i32 0, %mul |
| %add = add i32 %accum, %sub |
| %iv.next = add i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, 1024 |
| br i1 %exitcond.not, label %exit, label %loop |
| |
| exit: |
| ret i32 %add |
| } |
| |
| define i64 @print_mulacc_sub_extended(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_mulacc_sub_extended' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP8:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx1> = getelementptr inbounds ir<%y>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx1> |
| ; CHECK-NEXT: WIDEN ir<%load1> = load vp<[[VP7]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP8]]> = ir<%rdx> + reduce.sub (mul nsw (ir<%load0> sext to i64), (ir<%load1> sext to i64)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = compute-reduction-result (sub, in-loop) vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP10]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i16, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %iv |
| ; CHECK-NEXT: IR %load1 = load i16, ptr %arrayidx1, align 4 |
| ; CHECK-NEXT: IR %conv0 = sext i16 %load0 to i32 |
| ; CHECK-NEXT: IR %conv1 = sext i16 %load1 to i32 |
| ; CHECK-NEXT: IR %mul = mul nsw i32 %conv0, %conv1 |
| ; CHECK-NEXT: IR %conv = sext i32 %mul to i64 |
| ; CHECK-NEXT: IR %rdx.next = sub nsw i64 %rdx, %conv |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| %load0 = load i16, ptr %arrayidx, align 4 |
| %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %iv |
| %load1 = load i16, ptr %arrayidx1, align 4 |
| %conv0 = sext i16 %load0 to i32 |
| %conv1 = sext i16 %load1 to i32 |
| %mul = mul nsw i32 %conv0, %conv1 |
| %conv = sext i32 %mul to i64 |
| %rdx.next = sub nsw i64 %rdx, %conv |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i64 @print_mulacc_duplicate_extends(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { |
| ; CHECK-LABEL: 'print_mulacc_duplicate_extends' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<%n> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%rdx> = phi vp<[[VP3]]>, vp<[[VP7:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%x>, vp<[[VP5]]> |
| ; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer inbounds ir<%arrayidx> |
| ; CHECK-NEXT: WIDEN ir<%load0> = load vp<[[VP6]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP7]]> = ir<%rdx> + reduce.sub (mul nsw (ir<%load0> sext to i64), (ir<%load0> sext to i64)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = compute-reduction-result (sub, in-loop) vp<[[VP7]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%n>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %r.0.lcssa = phi i64 [ %rdx.next, %loop ] (extra operand: vp<[[VP9]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| ; CHECK-NEXT: IR %load0 = load i16, ptr %arrayidx, align 4 |
| ; CHECK-NEXT: IR %conv0 = sext i16 %load0 to i32 |
| ; CHECK-NEXT: IR %mul = mul nsw i32 %conv0, %conv0 |
| ; CHECK-NEXT: IR %conv = sext i32 %mul to i64 |
| ; CHECK-NEXT: IR %rdx.next = sub nsw i64 %rdx, %conv |
| ; CHECK-NEXT: IR %iv.next = add nuw nsw i32 %iv, 1 |
| ; CHECK-NEXT: IR %exitcond = icmp eq i32 %iv.next, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] |
| %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv |
| %load0 = load i16, ptr %arrayidx, align 4 |
| %conv0 = sext i16 %load0 to i32 |
| %mul = mul nsw i32 %conv0, %conv0 |
| %conv = sext i32 %mul to i64 |
| %rdx.next = sub nsw i64 %rdx, %conv |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %exitcond = icmp eq i32 %iv.next, %n |
| br i1 %exitcond, label %exit, label %loop |
| |
| exit: |
| %r.0.lcssa = phi i64 [ %rdx.next, %loop ] |
| ret i64 %r.0.lcssa |
| } |
| |
| define i32 @print_mulacc_extended_const(ptr %start, ptr %end) { |
| ; CHECK-LABEL: 'print_mulacc_extended_const' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = DERIVED-IV ir<%start> + vp<[[VP2]]> * ir<1> |
| ; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP5]]>, vp<[[VP9:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP6]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<[[VP7]]> |
| ; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer vp<%next.gep> |
| ; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP8]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP9]]> = ir<%red> + reduce.add (mul (ir<%l> zext to i32), (ir<63> zext to i32)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP11:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP9]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<[[VP11]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP4]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP11]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 |
| ; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 |
| ; CHECK-NEXT: IR %red.next = add i32 %red, %mul |
| ; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| ; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = zext i8 %l to i32 |
| %mul = mul i32 %l.ext, 63 |
| %red.next = add i32 %red, %mul |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| ret i32 %red.next |
| } |
| |
| define i32 @print_mulacc_extended_const_lhs(ptr %start, ptr %end) { |
| ; CHECK-LABEL: 'print_mulacc_extended_const_lhs' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = DERIVED-IV ir<%start> + vp<[[VP2]]> * ir<1> |
| ; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP5]]>, vp<[[VP9:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP6]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<[[VP7]]> |
| ; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer vp<%next.gep> |
| ; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP8]]> |
| ; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = zext ir<%l> to i32 |
| ; CHECK-NEXT: EXPRESSION vp<[[VP9]]> = ir<%red> + reduce.add (mul ir<63>, ir<%l.ext>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP11:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP9]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<[[VP11]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP4]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP11]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 |
| ; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 63, %l.ext |
| ; CHECK-NEXT: IR %red.next = add i32 %red, %mul |
| ; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| ; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = zext i8 %l to i32 |
| %mul = mul i32 63, %l.ext |
| %red.next = add i32 %red, %mul |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| ret i32 %red.next |
| } |
| |
| ; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128 |
| define i32 @print_mulacc_not_extended_const(ptr %start, ptr %end) { |
| ; CHECK-LABEL: 'print_mulacc_not_extended_const' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = DERIVED-IV ir<%start> + vp<[[VP2]]> * ir<1> |
| ; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP5]]>, vp<[[VP9:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP6]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<[[VP7]]> |
| ; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer vp<%next.gep> |
| ; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP8]]> |
| ; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 |
| ; CHECK-NEXT: EXPRESSION vp<[[VP9]]> = ir<%red> + reduce.add (mul ir<%l.ext>, ir<128>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP11:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP9]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<[[VP11]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP4]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP11]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 |
| ; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 |
| ; CHECK-NEXT: IR %red.next = add i32 %red, %mul |
| ; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| ; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = sext i8 %l to i32 |
| %mul = mul i32 %l.ext, 128 |
| %red.next = add i32 %red, %mul |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| %red.next.lcssa = phi i32 [ %red.next, %loop ] |
| ret i32 %red.next.lcssa |
| } |
| |
| define i64 @print_ext_mulacc_extended_const(ptr %start, ptr %end) { |
| ; CHECK-LABEL: 'print_ext_mulacc_extended_const' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = DERIVED-IV ir<%start> + vp<[[VP2]]> * ir<1> |
| ; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP5]]>, vp<[[VP9:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP6]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<[[VP7]]> |
| ; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer vp<%next.gep> |
| ; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP8]]> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP9]]> = ir<%red> + reduce.add (mul (ir<%l> zext to i64), (ir<63> zext to i64)) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP11:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP9]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<[[VP11]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP4]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP11]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 |
| ; CHECK-NEXT: IR %l.ext = zext i8 %l to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %l.ext, 63 |
| ; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64 |
| ; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext |
| ; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| ; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = zext i8 %l to i32 |
| %mul = mul i32 %l.ext, 63 |
| %mul.ext = zext i32 %mul to i64 |
| %red.next = add i64 %red, %mul.ext |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| ret i64 %red.next |
| } |
| |
| ; Constants >= 128 cannot be treated as sign-extended, so the expression shouldn't extend 128 |
| define i64 @print_ext_mulacc_not_extended_const(ptr %start, ptr %end) { |
| ; CHECK-LABEL: 'print_ext_mulacc_not_extended_const' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = DERIVED-IV ir<%start> + vp<[[VP2]]> * ir<1> |
| ; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi vp<[[VP5]]>, vp<[[VP10:%[0-9]+]]> |
| ; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP6]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT vp<%next.gep> = ptradd ir<%start>, vp<[[VP7]]> |
| ; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer vp<%next.gep> |
| ; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VP8]]> |
| ; CHECK-NEXT: WIDEN-CAST ir<%l.ext> = sext ir<%l> to i32 |
| ; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = shl ir<%l.ext>, ir<7> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP10]]> = ir<%red> + reduce.add (vp<[[VP9]]> sext to i64) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP12:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP10]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i64 [ %red.next, %loop ] (extra operand: vp<[[VP12]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP4]]>, middle.block ], [ ir<%start>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP12]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %l = load i8, ptr %ptr.iv, align 1 |
| ; CHECK-NEXT: IR %l.ext = sext i8 %l to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %l.ext, 128 |
| ; CHECK-NEXT: IR %mul.ext = sext i32 %mul to i64 |
| ; CHECK-NEXT: IR %red.next = add i64 %red, %mul.ext |
| ; CHECK-NEXT: IR %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| ; CHECK-NEXT: IR %ec = icmp eq ptr %ptr.iv, %end |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = sext i8 %l to i32 |
| %mul = mul i32 %l.ext, 128 |
| %mul.ext = sext i32 %mul to i64 |
| %red.next = add i64 %red, %mul.ext |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| %red.next.lcssa = phi i64 [ %red.next, %loop ] |
| ret i64 %red.next.lcssa |
| } |
| |
| ; This reduce.add(ext(mul(ext(A), ext(B)))) can't be turned into an |
| ; ExtMulAccReduction VPExpressionRecipe since the mul has two users. |
| ; It can however be turned into an ExtendedReduction since that one doesn't |
| ; modify the mul's operands. |
| define i64 @print_ext_mul_two_uses(i64 %n, ptr %a, i16 %b, i32 %c) { |
| ; CHECK-LABEL: 'print_ext_mul_two_uses' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: vp<[[VP2:%[0-9]+]]> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: EMIT vp<[[VP2]]> = EXPAND SCEV (1 + %n) |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> |
| ; CHECK-NEXT: WIDEN-CAST ir<%conv> = sext ir<%b> to i32 |
| ; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%conv>, ir<%conv> |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%res2> = phi vp<[[VP3]]>, vp<[[VP5:%[0-9]+]]> |
| ; CHECK-NEXT: CLONE ir<%load> = load ir<%a> |
| ; CHECK-NEXT: EXPRESSION vp<[[VP5]]> = ir<%res2> + reduce.add (ir<%mul> zext to i64) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP0]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP1]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: WIDEN-CAST ir<%load.ext> = sext ir<%load> to i32 |
| ; CHECK-NEXT: WIDEN-CAST ir<%load.ext.ext> = sext ir<%load.ext> to i64 |
| ; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = compute-reduction-result (add, in-loop) vp<[[VP5]]> |
| ; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = extract-last-part ir<%load.ext.ext> |
| ; CHECK-NEXT: EMIT vp<%vector.recur.extract> = extract-last-lane vp<[[VP8]]> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP2]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %add.lcssa = phi i64 [ %add, %loop ] (extra operand: vp<[[VP7]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP1]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%scalar.recur.init> = phi [ vp<%vector.recur.extract>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %res1 = phi i64 [ 0, %entry ], [ %load.ext.ext, %loop ] (extra operand: vp<%scalar.recur.init> from scalar.ph) |
| ; CHECK-NEXT: IR %res2 = phi i64 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %load = load i16, ptr %a, align 2 |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %conv = sext i16 %b to i32 |
| ; CHECK-NEXT: IR %mul = mul i32 %conv, %conv |
| ; CHECK-NEXT: IR %mul.ext = zext i32 %mul to i64 |
| ; CHECK-NEXT: IR %add = add i64 %res2, %mul.ext |
| ; CHECK-NEXT: IR %second_use = or i32 %mul, %c |
| ; CHECK-NEXT: IR %load.ext = sext i16 %load to i32 |
| ; CHECK-NEXT: IR %load.ext.ext = sext i32 %load.ext to i64 |
| ; CHECK-NEXT: IR %exitcond740.not = icmp eq i64 %iv, %n |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %res1 = phi i64 [ 0, %entry ], [ %load.ext.ext, %loop ] |
| %res2 = phi i64 [ 0, %entry ], [ %add, %loop ] |
| %load = load i16, ptr %a, align 2 |
| %iv.next = add i64 %iv, 1 |
| %conv = sext i16 %b to i32 |
| %mul = mul i32 %conv, %conv |
| %mul.ext = zext i32 %mul to i64 |
| %add = add i64 %res2, %mul.ext |
| %second_use = or i32 %mul, %c ; this value is otherwise unused, but that's sufficient for the test |
| %load.ext = sext i16 %load to i32 |
| %load.ext.ext = sext i32 %load.ext to i64 |
| %exitcond740.not = icmp eq i64 %iv, %n |
| br i1 %exitcond740.not, label %exit, label %loop |
| |
| exit: |
| ret i64 %add |
| } |
| |
| define i32 @print_umax_reduction(ptr %y) { |
| ; CHECK-LABEL: 'print_umax_reduction' |
| ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { |
| ; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF |
| ; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF |
| ; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count |
| ; CHECK-NEXT: Live-in ir<100> = original trip-count |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<entry>: |
| ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: vector.ph: |
| ; CHECK-NEXT: Successor(s): vector loop |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: <x1> vector loop: { |
| ; CHECK-NEXT: vector.body: |
| ; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> |
| ; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0>, ir<%red.next> |
| ; CHECK-NEXT: vp<[[VP4:%[0-9]+]]> = SCALAR-STEPS vp<[[VP3]]>, ir<1>, vp<[[VP0]]> |
| ; CHECK-NEXT: CLONE ir<%gep> = getelementptr inbounds ir<%y>, vp<[[VP4]]> |
| ; CHECK-NEXT: vp<[[VP5:%[0-9]+]]> = vector-pointer inbounds ir<%gep> |
| ; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VP5]]> |
| ; CHECK-NEXT: WIDEN-INTRINSIC ir<%red.next> = call llvm.umax(ir<%lv>, ir<%red>) |
| ; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> |
| ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: Successor(s): middle.block |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: middle.block: |
| ; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = compute-reduction-result (umax) ir<%red.next> |
| ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<100>, vp<[[VP2]]> |
| ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> |
| ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<exit>: |
| ; CHECK-NEXT: IR %red.next.lcssa = phi i32 [ %red.next, %loop ] (extra operand: vp<[[VP7]]> from middle.block) |
| ; CHECK-NEXT: No successors |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: scalar.ph: |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP2]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] |
| ; CHECK-NEXT: Successor(s): ir-bb<loop> |
| ; CHECK-EMPTY: |
| ; CHECK-NEXT: ir-bb<loop>: |
| ; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) |
| ; CHECK-NEXT: IR %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) |
| ; CHECK-NEXT: IR %gep = getelementptr inbounds i32, ptr %y, i64 %iv |
| ; CHECK-NEXT: IR %lv = load i32, ptr %gep, align 4 |
| ; CHECK-NEXT: IR %red.next = call i32 @llvm.umax.i32(i32 %lv, i32 %red) |
| ; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 |
| ; CHECK-NEXT: IR %ec = icmp eq i64 %iv.next, 100 |
| ; CHECK-NEXT: No successors |
| ; CHECK-NEXT: } |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] |
| %gep = getelementptr inbounds i32, ptr %y, i64 %iv |
| %lv = load i32, ptr %gep, align 4 |
| %red.next = call i32 @llvm.umax(i32 %lv, i32 %red) |
| %iv.next = add i64 %iv, 1 |
| %ec = icmp eq i64 %iv.next, 100 |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| ret i32 %red.next |
| } |