| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 |
| ; RUN: opt -passes="loop-vectorize" -mtriple=arm64-apple-macosx -S %s | FileCheck %s |
| |
| define i64 @reduction(i64 %arg) #0 { |
| ; CHECK-LABEL: define i64 @reduction( |
| ; CHECK-SAME: i64 [[ARG:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], splat (i64 4) |
| ; CHECK-NEXT: [[TMP5]] = or <4 x i32> [[VEC_PHI]], splat (i32 1) |
| ; CHECK-NEXT: [[TMP1]] = or <4 x i32> [[VEC_PHI2]], splat (i32 1) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 |
| ; CHECK-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[STEP_ADD]], splat (i64 4) |
| ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 |
| ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[BIN_RDX1:%.*]] = or <4 x i32> [[TMP1]], [[TMP5]] |
| ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[BIN_RDX1]]) |
| ; CHECK-NEXT: br label %[[SCALAR_PH:.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; CHECK-NEXT: br label %[[LOOP:.*]] |
| ; CHECK: [[LOOP]]: |
| ; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 96, %[[SCALAR_PH]] ], [ [[TMP3:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ [[TMP7]], %[[SCALAR_PH]] ], [ [[TMP2:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[TMP2]] = or i32 [[VEC_PHI1]], 1 |
| ; CHECK-NEXT: [[TMP3]] = add nsw i64 [[TMP0]], 1 |
| ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw i64 [[TMP3]], [[ARG]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[TMP3]], 100 |
| ; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i64 [ [[TMP4]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[BIN_RDX:%.*]] = phi i32 [ [[TMP2]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[EXT:%.*]] = zext i32 [[BIN_RDX]] to i64 |
| ; CHECK-NEXT: [[RES:%.*]] = add i64 [[EXT]], [[MUL_LCSSA]] |
| ; CHECK-NEXT: ret i64 [[RES]] |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] |
| %rdx.next = or i32 %rdx, 1 |
| %iv.next = add nsw i64 %iv, 1 |
| %mul = mul nsw i64 %iv.next, %arg |
| %cmp = icmp ne i64 %iv.next, 100 |
| br i1 %cmp, label %loop, label %exit |
| |
| exit: |
| %rdx.next.lcssa = phi i32 [ %rdx.next, %loop ] |
| %ext = zext i32 %rdx.next.lcssa to i64 |
| %res = add i64 %ext, %mul |
| ret i64 %res |
| } |