| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -passes=loop-vectorize,simplifycfg -verify-loop-info -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=UNROLL |
| ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -passes=loop-vectorize -verify-loop-info < %s | FileCheck %s --check-prefix=UNROLL-NOSIMPLIFY |
| ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=2 -force-vector-interleave=1 -passes=loop-vectorize,simplifycfg -verify-loop-info -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=VEC |
| |
| target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" |
| |
| ; Test predication of stores. |
| define i32 @test(ptr nocapture %f) #0 { |
| ; UNROLL-LABEL: @test( |
| ; UNROLL-NEXT: entry: |
| ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL: vector.body: |
| ; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; UNROLL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; UNROLL-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]] |
| ; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP1]] |
| ; UNROLL-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 |
| ; UNROLL-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 |
| ; UNROLL-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 100 |
| ; UNROLL-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP5]], 100 |
| ; UNROLL-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; UNROLL: pred.store.if: |
| ; UNROLL-NEXT: [[TMP8:%.*]] = add nsw i32 [[TMP4]], 20 |
| ; UNROLL-NEXT: store i32 [[TMP8]], ptr [[TMP2]], align 4 |
| ; UNROLL-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; UNROLL: pred.store.continue: |
| ; UNROLL-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL: pred.store.if1: |
| ; UNROLL-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP5]], 20 |
| ; UNROLL-NEXT: store i32 [[TMP9]], ptr [[TMP3]], align 4 |
| ; UNROLL-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL: pred.store.continue2: |
| ; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 |
| ; UNROLL-NEXT: br i1 [[TMP10]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; UNROLL: for.end: |
| ; UNROLL-NEXT: ret i32 0 |
| ; |
| ; UNROLL-NOSIMPLIFY-LABEL: @test( |
| ; UNROLL-NOSIMPLIFY-NEXT: entry: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP1]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 100 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP5]], 100 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = add nsw i32 [[TMP4]], 20 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP8]], ptr [[TMP2]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if1: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP5]], 20 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP9]], ptr [[TMP3]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue2: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: middle.block: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| ; UNROLL-NOSIMPLIFY: scalar.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 128, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 100 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: if.then: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], 20 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: for.inc: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: for.end: |
| ; UNROLL-NOSIMPLIFY-NEXT: ret i32 0 |
| ; |
| ; VEC-LABEL: @test( |
| ; VEC-NEXT: entry: |
| ; VEC-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; VEC: vector.body: |
| ; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; VEC-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]] |
| ; VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 |
| ; VEC-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4 |
| ; VEC-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], splat (i32 100) |
| ; VEC-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP3]], i32 0 |
| ; VEC-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; VEC: pred.store.if: |
| ; VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP0]] |
| ; VEC-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0 |
| ; VEC-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP6]], 20 |
| ; VEC-NEXT: store i32 [[TMP7]], ptr [[TMP5]], align 4 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; VEC: pred.store.continue: |
| ; VEC-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1 |
| ; VEC-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.if1: |
| ; VEC-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 |
| ; VEC-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP9]] |
| ; VEC-NEXT: [[TMP11:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1 |
| ; VEC-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP11]], 20 |
| ; VEC-NEXT: store i32 [[TMP12]], ptr [[TMP10]], align 4 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.continue2: |
| ; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; VEC-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 |
| ; VEC-NEXT: br i1 [[TMP13]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; VEC: for.end: |
| ; VEC-NEXT: ret i32 0 |
| ; |
| entry: |
| br label %for.body |
| |
| |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] |
| %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %cmp1 = icmp sgt i32 %0, 100 |
| br i1 %cmp1, label %if.then, label %for.inc |
| |
| if.then: |
| %add = add nsw i32 %0, 20 |
| store i32 %add, ptr %arrayidx, align 4 |
| br label %for.inc |
| |
| for.inc: |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond = icmp eq i64 %indvars.iv.next, 128 |
| br i1 %exitcond, label %for.end, label %for.body |
| |
| for.end: |
| ret i32 0 |
| } |
| |
| ; Track basic blocks when unrolling conditional blocks. This code used to assert |
| ; because we did not update the phi nodes with the proper predecessor in the |
| ; vectorized loop body. |
| ; PR18724 |
| |
| define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) { |
| ; UNROLL-LABEL: @bug18724( |
| ; UNROLL-NEXT: entry: |
| ; UNROLL-NEXT: [[TMP0:%.*]] = xor i1 [[COND:%.*]], true |
| ; UNROLL-NEXT: call void @llvm.assume(i1 [[TMP0]]) |
| ; UNROLL-NEXT: [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32 |
| ; UNROLL-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) |
| ; UNROLL-NEXT: [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]] |
| ; UNROLL-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 |
| ; UNROLL-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 |
| ; UNROLL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2 |
| ; UNROLL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; UNROLL: vector.ph: |
| ; UNROLL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2 |
| ; UNROLL-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] |
| ; UNROLL-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] |
| ; UNROLL-NEXT: [[TMP13:%.*]] = xor i1 [[COND_2:%.*]], true |
| ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL: vector.body: |
| ; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ] |
| ; UNROLL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE3]] ] |
| ; UNROLL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI4:%.*]], [[PRED_STORE_CONTINUE3]] ] |
| ; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] |
| ; UNROLL-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; UNROLL-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1 |
| ; UNROLL-NEXT: [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP5]] |
| ; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP6]] |
| ; UNROLL-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4 |
| ; UNROLL-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4 |
| ; UNROLL-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE3]] |
| ; UNROLL: pred.store.if: |
| ; UNROLL-NEXT: store i32 [[TMP9]], ptr [[TMP7]], align 4 |
| ; UNROLL-NEXT: store i32 [[TMP10]], ptr [[TMP8]], align 4 |
| ; UNROLL-NEXT: br label [[PRED_STORE_CONTINUE3]] |
| ; UNROLL: pred.store.continue3: |
| ; UNROLL-NEXT: [[TMP11:%.*]] = add i32 [[VEC_PHI]], 1 |
| ; UNROLL-NEXT: [[TMP12:%.*]] = add i32 [[VEC_PHI1]], 1 |
| ; UNROLL-NEXT: [[PREDPHI]] = select i1 [[TMP13]], i32 [[VEC_PHI]], i32 [[TMP11]] |
| ; UNROLL-NEXT: [[PREDPHI4]] = select i1 [[TMP13]], i32 [[VEC_PHI1]], i32 [[TMP12]] |
| ; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; UNROLL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; UNROLL: middle.block: |
| ; UNROLL-NEXT: [[BIN_RDX:%.*]] = add i32 [[PREDPHI4]], [[PREDPHI]] |
| ; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] |
| ; UNROLL-NEXT: [[TMP16:%.*]] = xor i1 [[CMP_N]], true |
| ; UNROLL-NEXT: call void @llvm.assume(i1 [[TMP16]]) |
| ; UNROLL-NEXT: br label [[SCALAR_PH]] |
| ; UNROLL: scalar.ph: |
| ; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ] |
| ; UNROLL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[ENTRY]] ] |
| ; UNROLL-NEXT: br label [[FOR_BODY14:%.*]] |
| ; UNROLL: for.body14: |
| ; UNROLL-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; UNROLL-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] |
| ; UNROLL-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]] |
| ; UNROLL-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4 |
| ; UNROLL-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] |
| ; UNROLL: if.then18: |
| ; UNROLL-NEXT: store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4 |
| ; UNROLL-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 |
| ; UNROLL-NEXT: br label [[FOR_INC23]] |
| ; UNROLL: for.inc23: |
| ; UNROLL-NEXT: [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ] |
| ; UNROLL-NEXT: [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1 |
| ; UNROLL-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32 |
| ; UNROLL-NEXT: [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0 |
| ; UNROLL-NEXT: call void @llvm.assume(i1 [[CMP13]]) |
| ; UNROLL-NEXT: br label [[FOR_BODY14]] |
| ; |
| ; UNROLL-NOSIMPLIFY-LABEL: @bug18724( |
| ; UNROLL-NOSIMPLIFY-NEXT: entry: |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY9:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body9: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND:%.*]], label [[FOR_INC26:%.*]], label [[FOR_BODY14_PREHEADER:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body14.preheader: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = trunc i64 [[V_1:%.*]] to i32 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 0) |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP12:%.*]] = xor i1 [[COND_2:%.*]], true |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE3]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI4:%.*]], [[PRED_STORE_CONTINUE3]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP4]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP5]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP6]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP8]], ptr [[TMP6]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if2: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP9]], ptr [[TMP7]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE3]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue3: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = add i32 [[VEC_PHI]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = add i32 [[VEC_PHI1]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI]] = select i1 [[TMP12]], i32 [[VEC_PHI]], i32 [[TMP10]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI4]] = select i1 [[TMP12]], i32 [[VEC_PHI1]], i32 [[TMP11]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: middle.block: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BIN_RDX:%.*]] = add i32 [[PREDPHI4]], [[PREDPHI]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP_N]], label [[FOR_INC26_LOOPEXIT:%.*]], label [[SCALAR_PH]] |
| ; UNROLL-NOSIMPLIFY: scalar.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[FOR_BODY14_PREHEADER]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[FOR_BODY14_PREHEADER]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY14:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body14: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] |
| ; UNROLL-NOSIMPLIFY: if.then18: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC23]] |
| ; UNROLL-NOSIMPLIFY: for.inc23: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP13]], label [[FOR_BODY14]], label [[FOR_INC26_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: for.inc26.loopexit: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INEWCHUNKS_2_LCSSA:%.*]] = phi i32 [ [[INEWCHUNKS_2]], [[FOR_INC23]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC26]] |
| ; UNROLL-NOSIMPLIFY: for.inc26: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INEWCHUNKS_1_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY9]] ], [ [[INEWCHUNKS_2_LCSSA]], [[FOR_INC26_LOOPEXIT]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: unreachable |
| ; |
| ; VEC-LABEL: @bug18724( |
| ; VEC-NEXT: entry: |
| ; VEC-NEXT: [[TMP0:%.*]] = xor i1 [[COND:%.*]], true |
| ; VEC-NEXT: call void @llvm.assume(i1 [[TMP0]]) |
| ; VEC-NEXT: [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32 |
| ; VEC-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) |
| ; VEC-NEXT: [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]] |
| ; VEC-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 |
| ; VEC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 |
| ; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2 |
| ; VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; VEC: vector.ph: |
| ; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2 |
| ; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] |
| ; VEC-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] |
| ; VEC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND_2:%.*]], i64 0 |
| ; VEC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer |
| ; VEC-NEXT: [[TMP17:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) |
| ; VEC-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0 |
| ; VEC-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; VEC: vector.body: |
| ; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; VEC-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE2]] ] |
| ; VEC-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] |
| ; VEC-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; VEC-NEXT: [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP6]] |
| ; VEC-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 |
| ; VEC-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4 |
| ; VEC-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 0 |
| ; VEC-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; VEC: pred.store.if: |
| ; VEC-NEXT: [[TMP10:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP6]] |
| ; VEC-NEXT: [[TMP11:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0 |
| ; VEC-NEXT: store i32 [[TMP11]], ptr [[TMP10]], align 4 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; VEC: pred.store.continue: |
| ; VEC-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 1 |
| ; VEC-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.if1: |
| ; VEC-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 1 |
| ; VEC-NEXT: [[TMP14:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP13]] |
| ; VEC-NEXT: [[TMP15:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1 |
| ; VEC-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.continue2: |
| ; VEC-NEXT: [[TMP16:%.*]] = add <2 x i32> [[VEC_PHI]], splat (i32 1) |
| ; VEC-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP17]], <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP16]] |
| ; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; VEC-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; VEC-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; VEC: middle.block: |
| ; VEC-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI]]) |
| ; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] |
| ; VEC-NEXT: [[TMP20:%.*]] = xor i1 [[CMP_N]], true |
| ; VEC-NEXT: call void @llvm.assume(i1 [[TMP20]]) |
| ; VEC-NEXT: br label [[SCALAR_PH]] |
| ; VEC: scalar.ph: |
| ; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ] |
| ; VEC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[ENTRY]] ] |
| ; VEC-NEXT: br label [[FOR_BODY14:%.*]] |
| ; VEC: for.body14: |
| ; VEC-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; VEC-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] |
| ; VEC-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]] |
| ; VEC-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4 |
| ; VEC-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] |
| ; VEC: if.then18: |
| ; VEC-NEXT: store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4 |
| ; VEC-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 |
| ; VEC-NEXT: br label [[FOR_INC23]] |
| ; VEC: for.inc23: |
| ; VEC-NEXT: [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ] |
| ; VEC-NEXT: [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1 |
| ; VEC-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32 |
| ; VEC-NEXT: [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0 |
| ; VEC-NEXT: call void @llvm.assume(i1 [[CMP13]]) |
| ; VEC-NEXT: br label [[FOR_BODY14]] |
| ; |
| entry: |
| br label %for.body9 |
| |
| for.body9: |
| br i1 %cond, label %for.inc26, label %for.body14 |
| |
| for.body14: |
| %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ %v.1, %for.body9 ] |
| %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ %v.2, %for.body9 ] |
| %arrayidx16 = getelementptr inbounds [768 x i32], ptr %ptr, i64 0, i64 %indvars.iv3 |
| %tmp = load i32, ptr %arrayidx16, align 4 |
| br i1 %cond.2, label %if.then18, label %for.inc23 |
| |
| if.then18: |
| store i32 %tmp, ptr %arrayidx16, align 4 |
| %inc21 = add nsw i32 %iNewChunks.120, 1 |
| br label %for.inc23 |
| |
| for.inc23: |
| %iNewChunks.2 = phi i32 [ %inc21, %if.then18 ], [ %iNewChunks.120, %for.body14 ] |
| %indvars.iv.next4 = add nsw i64 %indvars.iv3, 1 |
| %tmp1 = trunc i64 %indvars.iv3 to i32 |
| %cmp13 = icmp slt i32 %tmp1, 0 |
| br i1 %cmp13, label %for.body14, label %for.inc26 |
| |
| for.inc26: |
| %iNewChunks.1.lcssa = phi i32 [ undef, %for.body9 ], [ %iNewChunks.2, %for.inc23 ] |
| unreachable |
| } |
| |
| ; In the test below, it's more profitable for the expression feeding the |
| ; conditional store to remain scalar. Since we can only type-shrink vector |
| ; types, we shouldn't try to represent the expression in a smaller type. |
| ; |
| define void @minimal_bit_widths(i1 %c) { |
| ; UNROLL-LABEL: @minimal_bit_widths( |
| ; UNROLL-NEXT: entry: |
| ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL: vector.body: |
| ; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; UNROLL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; UNROLL-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]] |
| ; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]] |
| ; UNROLL-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1 |
| ; UNROLL-NEXT: br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL: pred.store.if: |
| ; UNROLL-NEXT: store i8 [[TMP4]], ptr [[TMP2]], align 1 |
| ; UNROLL-NEXT: store i8 [[TMP5]], ptr [[TMP3]], align 1 |
| ; UNROLL-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL: pred.store.continue2: |
| ; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; UNROLL-NEXT: br i1 [[TMP6]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; UNROLL: for.end: |
| ; UNROLL-NEXT: ret void |
| ; |
| ; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths( |
| ; UNROLL-NOSIMPLIFY-NEXT: entry: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP4]], ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if1: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP5]], ptr [[TMP3]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue2: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: middle.block: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| ; UNROLL-NOSIMPLIFY: scalar.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1000, [[ENTRY]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: if.then: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP5]], ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: for.inc: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP1]], -1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: for.end: |
| ; UNROLL-NOSIMPLIFY-NEXT: ret void |
| ; |
| ; VEC-LABEL: @minimal_bit_widths( |
| ; VEC-NEXT: entry: |
| ; VEC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i64 0 |
| ; VEC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer |
| ; VEC-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; VEC: vector.body: |
| ; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; VEC-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; VEC-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]] |
| ; VEC-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 0 |
| ; VEC-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[TMP2]], align 1 |
| ; VEC-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 0 |
| ; VEC-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; VEC: pred.store.if: |
| ; VEC-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]] |
| ; VEC-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 0 |
| ; VEC-NEXT: store i8 [[TMP5]], ptr [[TMP4]], align 1 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; VEC: pred.store.continue: |
| ; VEC-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 1 |
| ; VEC-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.if1: |
| ; VEC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1 |
| ; VEC-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr undef, i64 [[TMP7]] |
| ; VEC-NEXT: [[TMP9:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 1 |
| ; VEC-NEXT: store i8 [[TMP9]], ptr [[TMP8]], align 1 |
| ; VEC-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; VEC: pred.store.continue2: |
| ; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; VEC-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; VEC-NEXT: br i1 [[TMP10]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; VEC: for.end: |
| ; VEC-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ] |
| %tmp1 = phi i64 [ %tmp7, %for.inc ], [ 1000, %entry ] |
| %tmp2 = getelementptr i8, ptr undef, i64 %tmp0 |
| %tmp3 = load i8, ptr %tmp2, align 1 |
| br i1 %c, label %if.then, label %for.inc |
| |
| if.then: |
| %tmp4 = zext i8 %tmp3 to i32 |
| %tmp5 = trunc i32 %tmp4 to i8 |
| store i8 %tmp5, ptr %tmp2, align 1 |
| br label %for.inc |
| |
| for.inc: |
| %tmp6 = add nuw nsw i64 %tmp0, 1 |
| %tmp7 = add i64 %tmp1, -1 |
| %tmp8 = icmp eq i64 %tmp7, 0 |
| br i1 %tmp8, label %for.end, label %for.body |
| |
| for.end: |
| ret void |
| } |
| |
| define void @minimal_bit_widths_with_aliasing_store(i1 %c, ptr %ptr) { |
| ; UNROLL-LABEL: @minimal_bit_widths_with_aliasing_store( |
| ; UNROLL-NEXT: entry: |
| ; UNROLL-NEXT: br label [[FOR_BODY:%.*]] |
| ; UNROLL: for.body: |
| ; UNROLL-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] |
| ; UNROLL-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 0, [[ENTRY]] ] |
| ; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]] |
| ; UNROLL-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NEXT: store i8 0, ptr [[TMP2]], align 1 |
| ; UNROLL-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; UNROLL: if.then: |
| ; UNROLL-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 |
| ; UNROLL-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8 |
| ; UNROLL-NEXT: store i8 [[TMP5]], ptr [[TMP2]], align 1 |
| ; UNROLL-NEXT: br label [[FOR_INC]] |
| ; UNROLL: for.inc: |
| ; UNROLL-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1 |
| ; UNROLL-NEXT: [[TMP7]] = add i64 [[TMP1]], -1 |
| ; UNROLL-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 |
| ; UNROLL-NEXT: br i1 [[TMP8]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; UNROLL: for.end: |
| ; UNROLL-NEXT: ret void |
| ; |
| ; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths_with_aliasing_store( |
| ; UNROLL-NOSIMPLIFY-NEXT: entry: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: vector.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP3]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP4]], ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.if1: |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP5]], ptr [[TMP3]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE2]] |
| ; UNROLL-NOSIMPLIFY: pred.store.continue2: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: middle.block: |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| ; UNROLL-NOSIMPLIFY: scalar.ph: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] |
| ; UNROLL-NOSIMPLIFY: for.body: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP0]] |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: if.then: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8 |
| ; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP5]], ptr [[TMP2]], align 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] |
| ; UNROLL-NOSIMPLIFY: for.inc: |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP1]], -1 |
| ; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 |
| ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] |
| ; UNROLL-NOSIMPLIFY: for.end: |
| ; UNROLL-NOSIMPLIFY-NEXT: ret void |
| ; |
| ; VEC-LABEL: @minimal_bit_widths_with_aliasing_store( |
| ; VEC-NEXT: entry: |
| ; VEC-NEXT: br label [[FOR_BODY:%.*]] |
| ; VEC: for.body: |
| ; VEC-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] |
| ; VEC-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 0, [[ENTRY]] ] |
| ; VEC-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]] |
| ; VEC-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1 |
| ; VEC-NEXT: store i8 0, ptr [[TMP2]], align 1 |
| ; VEC-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; VEC: if.then: |
| ; VEC-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 |
| ; VEC-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8 |
| ; VEC-NEXT: store i8 [[TMP5]], ptr [[TMP2]], align 1 |
| ; VEC-NEXT: br label [[FOR_INC]] |
| ; VEC: for.inc: |
| ; VEC-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1 |
| ; VEC-NEXT: [[TMP7]] = add i64 [[TMP1]], -1 |
| ; VEC-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 |
| ; VEC-NEXT: br i1 [[TMP8]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; VEC: for.end: |
| ; VEC-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ] |
| %tmp1 = phi i64 [ %tmp7, %for.inc ], [ 0, %entry ] |
| %tmp2 = getelementptr i8, ptr %ptr, i64 %tmp0 |
| %tmp3 = load i8, ptr %tmp2, align 1 |
| store i8 0, ptr %tmp2 |
| br i1 %c, label %if.then, label %for.inc |
| |
| if.then: |
| %tmp4 = zext i8 %tmp3 to i32 |
| %tmp5 = trunc i32 %tmp4 to i8 |
| store i8 %tmp5, ptr %tmp2, align 1 |
| br label %for.inc |
| |
| for.inc: |
| %tmp6 = add nuw nsw i64 %tmp0, 1 |
| %tmp7 = add i64 %tmp1, -1 |
| %tmp8 = icmp eq i64 %tmp7, 0 |
| br i1 %tmp8, label %for.end, label %for.body |
| |
| for.end: |
| ret void |
| } |