| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -force-tail-folding-style=data-with-evl \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ |
| ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -force-tail-folding-style=none \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-vector-width=4 \ |
| ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -force-tail-folding-style=none \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ |
| ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP-DEF %s |
| |
| ; The target does not support predicated vectorization. |
| define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { |
| ; IF-EVL-LABEL: @foo( |
| ; IF-EVL-NEXT: entry: |
| ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] |
| ; IF-EVL: for.body: |
| ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IV]] |
| ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IV]] |
| ; IF-EVL-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 |
| ; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] |
| ; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] |
| ; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 |
| ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] |
| ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] |
| ; IF-EVL: for.cond.cleanup: |
| ; IF-EVL-NEXT: ret void |
| ; |
| ; NO-VP-LABEL: @foo( |
| ; NO-VP-NEXT: entry: |
| ; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 [[TMP0]], 4 |
| ; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP8]] |
| ; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; NO-VP: vector.ph: |
| ; NO-VP-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 [[TMP1]], 4 |
| ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP14]] |
| ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| ; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP15:%.*]] = mul i64 [[TMP2]], 4 |
| ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; NO-VP: vector.body: |
| ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 |
| ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP3]] |
| ; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 |
| ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4 |
| ; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP3]] |
| ; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 |
| ; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4 |
| ; NO-VP-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] |
| ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]] |
| ; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 |
| ; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP16]], ptr [[TMP10]], align 4 |
| ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]] |
| ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; NO-VP: middle.block: |
| ; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] |
| ; NO-VP: scalar.ph: |
| ; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP: for.body: |
| ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] |
| ; NO-VP-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] |
| ; NO-VP-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 |
| ; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]] |
| ; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] |
| ; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 |
| ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| ; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; NO-VP: for.cond.cleanup: |
| ; NO-VP-NEXT: ret void |
| ; |
| ; NO-VP-DEF-LABEL: @foo( |
| ; NO-VP-DEF-NEXT: entry: |
| ; NO-VP-DEF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-DEF-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP0]] |
| ; NO-VP-DEF-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; NO-VP-DEF: vector.ph: |
| ; NO-VP-DEF-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-DEF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP1]] |
| ; NO-VP-DEF-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| ; NO-VP-DEF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-DEF-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; NO-VP-DEF: vector.body: |
| ; NO-VP-DEF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; NO-VP-DEF-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 |
| ; NO-VP-DEF-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP3]] |
| ; NO-VP-DEF-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 |
| ; NO-VP-DEF-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP5]], align 4 |
| ; NO-VP-DEF-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP3]] |
| ; NO-VP-DEF-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 |
| ; NO-VP-DEF-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 1 x i32>, ptr [[TMP7]], align 4 |
| ; NO-VP-DEF-NEXT: [[TMP8:%.*]] = add nsw <vscale x 1 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] |
| ; NO-VP-DEF-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]] |
| ; NO-VP-DEF-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 |
| ; NO-VP-DEF-NEXT: store <vscale x 1 x i32> [[TMP8]], ptr [[TMP10]], align 4 |
| ; NO-VP-DEF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] |
| ; NO-VP-DEF-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-DEF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; NO-VP-DEF: middle.block: |
| ; NO-VP-DEF-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| ; NO-VP-DEF-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] |
| ; NO-VP-DEF: scalar.ph: |
| ; NO-VP-DEF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; NO-VP-DEF-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP-DEF: for.body: |
| ; NO-VP-DEF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-DEF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] |
| ; NO-VP-DEF-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; NO-VP-DEF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] |
| ; NO-VP-DEF-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 |
| ; NO-VP-DEF-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]] |
| ; NO-VP-DEF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] |
| ; NO-VP-DEF-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 |
| ; NO-VP-DEF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; NO-VP-DEF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| ; NO-VP-DEF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; NO-VP-DEF: for.cond.cleanup: |
| ; NO-VP-DEF-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv |
| %1 = load i32, ptr %arrayidx2, align 4 |
| %add = add nsw i32 %1, %0 |
| %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv |
| store i32 %add, ptr %arrayidx4, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, %N |
| br i1 %exitcond.not, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |