| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-OUTLOOP |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-inloop-reductions \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-INLOOP |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-OUTLOOP |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-inloop-reductions \ |
| ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-INLOOP |
| |
| define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) { |
| ; IF-EVL-OUTLOOP-LABEL: define void @reduction_intermediate_store( |
| ; IF-EVL-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]], ptr [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; IF-EVL-OUTLOOP-NEXT: entry: |
| ; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_MEMCHECK:%.*]] |
| ; IF-EVL-OUTLOOP: vector.memcheck: |
| ; IF-EVL-OUTLOOP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[ADDR]], i64 4 |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2 |
| ; IF-EVL-OUTLOOP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP4]] |
| ; IF-EVL-OUTLOOP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[ADDR]], [[SCEVGEP1]] |
| ; IF-EVL-OUTLOOP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] |
| ; IF-EVL-OUTLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; IF-EVL-OUTLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] |
| ; IF-EVL-OUTLOOP: vector.ph: |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 |
| ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] |
| ; IF-EVL-OUTLOOP: vector.body: |
| ; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP10]], [[ENTRY]] ], [ [[TMP19:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]] |
| ; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]] |
| ; IF-EVL-OUTLOOP-NEXT: [[VP_OP:%.*]] = add <vscale x 4 x i32> [[VP_OP_LOAD]], [[VEC_PHI]] |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]]) |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64 |
| ; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]] |
| ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP21]] |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 |
| ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; IF-EVL-OUTLOOP: middle.block: |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) |
| ; IF-EVL-OUTLOOP-NEXT: store i32 [[TMP23]], ptr [[ADDR]], align 4, !alias.scope [[META6:![0-9]+]], !noalias [[META0]] |
| ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] |
| ; IF-EVL-OUTLOOP: scalar.ph: |
| ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY1:%.*]] |
| ; IF-EVL-OUTLOOP: for.body: |
| ; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] |
| ; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ] |
| ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] |
| ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP27]], [[RDX]] |
| ; IF-EVL-OUTLOOP-NEXT: store i32 [[ADD]], ptr [[ADDR]], align 4 |
| ; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 |
| ; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] |
| ; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; IF-EVL-OUTLOOP: for.end: |
| ; IF-EVL-OUTLOOP-NEXT: ret void |
| ; |
| ; IF-EVL-INLOOP-LABEL: define void @reduction_intermediate_store( |
| ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]], ptr [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; IF-EVL-INLOOP-NEXT: entry: |
| ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_MEMCHECK:%.*]] |
| ; IF-EVL-INLOOP: vector.memcheck: |
| ; IF-EVL-INLOOP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[ADDR]], i64 4 |
| ; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 2 |
| ; IF-EVL-INLOOP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]] |
| ; IF-EVL-INLOOP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[ADDR]], [[SCEVGEP1]] |
| ; IF-EVL-INLOOP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] |
| ; IF-EVL-INLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; IF-EVL-INLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; IF-EVL-INLOOP: vector.ph: |
| ; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; IF-EVL-INLOOP: vector.body: |
| ; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP13]], i32 4, i1 true) |
| ; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]] |
| ; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META0:![0-9]+]] |
| ; IF-EVL-INLOOP-NEXT: [[TMP21:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]) |
| ; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]] |
| ; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP14]] to i64 |
| ; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] |
| ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP13]], [[TMP23]] |
| ; IF-EVL-INLOOP-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 |
| ; IF-EVL-INLOOP-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; IF-EVL-INLOOP: middle.block: |
| ; IF-EVL-INLOOP-NEXT: store i32 [[TMP22]], ptr [[ADDR]], align 4, !alias.scope [[META6:![0-9]+]], !noalias [[META0]] |
| ; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] |
| ; IF-EVL-INLOOP: scalar.ph: |
| ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] |
| ; IF-EVL-INLOOP: for.body: |
| ; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] |
| ; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP25]], [[RDX]] |
| ; IF-EVL-INLOOP-NEXT: store i32 [[ADD]], ptr [[ADDR]], align 4 |
| ; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| ; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; IF-EVL-INLOOP: for.end: |
| ; IF-EVL-INLOOP-NEXT: ret void |
| ; |
| ; NO-VP-OUTLOOP-LABEL: define void @reduction_intermediate_store( |
| ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]], ptr [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; NO-VP-OUTLOOP-NEXT: entry: |
| ; NO-VP-OUTLOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-OUTLOOP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2 |
| ; NO-VP-OUTLOOP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] |
| ; NO-VP-OUTLOOP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] |
| ; NO-VP-OUTLOOP: vector.memcheck: |
| ; NO-VP-OUTLOOP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[ADDR]], i64 4 |
| ; NO-VP-OUTLOOP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2 |
| ; NO-VP-OUTLOOP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] |
| ; NO-VP-OUTLOOP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[ADDR]], [[SCEVGEP1]] |
| ; NO-VP-OUTLOOP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] |
| ; NO-VP-OUTLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; NO-VP-OUTLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] |
| ; NO-VP-OUTLOOP: vector.ph: |
| ; NO-VP-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-OUTLOOP-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 |
| ; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] |
| ; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| ; NO-VP-OUTLOOP-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0 |
| ; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; NO-VP-OUTLOOP: vector.body: |
| ; NO-VP-OUTLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP8]], [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] |
| ; NO-VP-OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP10]], align 4, !alias.scope [[META0:![0-9]+]] |
| ; NO-VP-OUTLOOP-NEXT: [[TMP12]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] |
| ; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] |
| ; NO-VP-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; NO-VP-OUTLOOP: middle.block: |
| ; NO-VP-OUTLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP12]]) |
| ; NO-VP-OUTLOOP-NEXT: store i32 [[TMP14]], ptr [[ADDR]], align 4, !alias.scope [[META6:![0-9]+]], !noalias [[META0]] |
| ; NO-VP-OUTLOOP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| ; NO-VP-OUTLOOP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| ; NO-VP-OUTLOOP: scalar.ph: |
| ; NO-VP-OUTLOOP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] |
| ; NO-VP-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY]] ], [ [[START]], [[VECTOR_MEMCHECK]] ] |
| ; NO-VP-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP-OUTLOOP: for.body: |
| ; NO-VP-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] |
| ; NO-VP-OUTLOOP-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; NO-VP-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP15]], [[RDX]] |
| ; NO-VP-OUTLOOP-NEXT: store i32 [[ADD]], ptr [[ADDR]], align 4 |
| ; NO-VP-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; NO-VP-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| ; NO-VP-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; NO-VP-OUTLOOP: for.end: |
| ; NO-VP-OUTLOOP-NEXT: ret void |
| ; |
| ; NO-VP-INLOOP-LABEL: define void @reduction_intermediate_store( |
| ; NO-VP-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]], ptr [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; NO-VP-INLOOP-NEXT: entry: |
| ; NO-VP-INLOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-INLOOP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2 |
| ; NO-VP-INLOOP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] |
| ; NO-VP-INLOOP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] |
| ; NO-VP-INLOOP: vector.memcheck: |
| ; NO-VP-INLOOP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[ADDR]], i64 4 |
| ; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2 |
| ; NO-VP-INLOOP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] |
| ; NO-VP-INLOOP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[ADDR]], [[SCEVGEP1]] |
| ; NO-VP-INLOOP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] |
| ; NO-VP-INLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; NO-VP-INLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] |
| ; NO-VP-INLOOP: vector.ph: |
| ; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2 |
| ; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] |
| ; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| ; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; NO-VP-INLOOP: vector.body: |
| ; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] |
| ; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]] |
| ; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) |
| ; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]] |
| ; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] |
| ; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; NO-VP-INLOOP: middle.block: |
| ; NO-VP-INLOOP-NEXT: store i32 [[TMP12]], ptr [[ADDR]], align 4, !alias.scope [[META6:![0-9]+]], !noalias [[META0]] |
| ; NO-VP-INLOOP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| ; NO-VP-INLOOP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| ; NO-VP-INLOOP: scalar.ph: |
| ; NO-VP-INLOOP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] |
| ; NO-VP-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY]] ], [ [[START]], [[VECTOR_MEMCHECK]] ] |
| ; NO-VP-INLOOP-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP-INLOOP: for.body: |
| ; NO-VP-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] |
| ; NO-VP-INLOOP-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; NO-VP-INLOOP-NEXT: [[ADD]] = add nsw i32 [[TMP14]], [[RDX]] |
| ; NO-VP-INLOOP-NEXT: store i32 [[ADD]], ptr [[ADDR]], align 4 |
| ; NO-VP-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; NO-VP-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| ; NO-VP-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; NO-VP-INLOOP: for.end: |
| ; NO-VP-INLOOP-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %rdx = phi i32 [ %start, %entry ], [ %add, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %add = add nsw i32 %0, %rdx |
| store i32 %add, ptr %addr, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, %n |
| br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 |
| |
| for.end: |
| ret void |
| } |
| |
| !0 = distinct !{!0, !1} |
| !1 = !{!"llvm.loop.vectorize.enable", i1 true} |
| ;. |
| ; IF-EVL-OUTLOOP: [[META0]] = !{[[META1:![0-9]+]]} |
| ; IF-EVL-OUTLOOP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} |
| ; IF-EVL-OUTLOOP: [[META2]] = distinct !{[[META2]], !"LVerDomain"} |
| ; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} |
| ; IF-EVL-OUTLOOP: [[META4]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; IF-EVL-OUTLOOP: [[META5]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; IF-EVL-OUTLOOP: [[META6]] = !{[[META7:![0-9]+]]} |
| ; IF-EVL-OUTLOOP: [[META7]] = distinct !{[[META7]], [[META2]]} |
| ; IF-EVL-OUTLOOP: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} |
| ;. |
| ; IF-EVL-INLOOP: [[META0]] = !{[[META1:![0-9]+]]} |
| ; IF-EVL-INLOOP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} |
| ; IF-EVL-INLOOP: [[META2]] = distinct !{[[META2]], !"LVerDomain"} |
| ; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} |
| ; IF-EVL-INLOOP: [[META4]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; IF-EVL-INLOOP: [[META5]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; IF-EVL-INLOOP: [[META6]] = !{[[META7:![0-9]+]]} |
| ; IF-EVL-INLOOP: [[META7]] = distinct !{[[META7]], [[META2]]} |
| ; IF-EVL-INLOOP: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} |
| ;. |
| ; NO-VP-OUTLOOP: [[META0]] = !{[[META1:![0-9]+]]} |
| ; NO-VP-OUTLOOP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} |
| ; NO-VP-OUTLOOP: [[META2]] = distinct !{[[META2]], !"LVerDomain"} |
| ; NO-VP-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} |
| ; NO-VP-OUTLOOP: [[META4]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; NO-VP-OUTLOOP: [[META5]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; NO-VP-OUTLOOP: [[META6]] = !{[[META7:![0-9]+]]} |
| ; NO-VP-OUTLOOP: [[META7]] = distinct !{[[META7]], [[META2]]} |
| ; NO-VP-OUTLOOP: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} |
| ;. |
| ; NO-VP-INLOOP: [[META0]] = !{[[META1:![0-9]+]]} |
| ; NO-VP-INLOOP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} |
| ; NO-VP-INLOOP: [[META2]] = distinct !{[[META2]], !"LVerDomain"} |
| ; NO-VP-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} |
| ; NO-VP-INLOOP: [[META4]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; NO-VP-INLOOP: [[META5]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; NO-VP-INLOOP: [[META6]] = !{[[META7:![0-9]+]]} |
| ; NO-VP-INLOOP: [[META7]] = distinct !{[[META7]], [[META2]]} |
| ; NO-VP-INLOOP: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} |
| ;. |