| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL |
| |
| ; RUN: opt -passes=loop-vectorize \ |
| ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ |
| ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP |
| |
| define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) { |
| ; IF-EVL-LABEL: @reverse_load_store( |
| ; IF-EVL-NEXT: entry: |
| ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; IF-EVL: vector.ph: |
| ; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 |
| ; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 |
| ; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] |
| ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 |
| ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; IF-EVL: vector.body: |
| ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) |
| ; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -1 |
| ; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP7]] |
| ; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP18]] |
| ; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP18]], 1 |
| ; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 -1, [[TMP11]] |
| ; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]] |
| ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP16]], i64 [[TMP10]] |
| ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[VP_REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP7]] |
| ; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[TMP14:%.*]] = mul i64 0, [[TMP19]] |
| ; IF-EVL-NEXT: [[TMP23:%.*]] = sub i64 [[TMP19]], 1 |
| ; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP23]] |
| ; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP13]], i64 [[TMP14]] |
| ; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP15]] |
| ; IF-EVL-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_REVERSE]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] |
| ; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024 |
| ; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; IF-EVL: middle.block: |
| ; IF-EVL-NEXT: br label [[LOOPEND:%.*]] |
| ; IF-EVL: scalar.ph: |
| ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[STARTVAL]], [[ENTRY:%.*]] ] |
| ; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ] |
| ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] |
| ; IF-EVL: for.body: |
| ; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] |
| ; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 |
| ; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] |
| ; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 |
| ; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] |
| ; IF-EVL-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 |
| ; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 |
| ; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 |
| ; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; IF-EVL: loopend: |
| ; IF-EVL-NEXT: ret void |
| ; |
| ; NO-VP-LABEL: @reverse_load_store( |
| ; NO-VP-NEXT: entry: |
| ; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 |
| ; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] |
| ; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; NO-VP: vector.ph: |
| ; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 |
| ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] |
| ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] |
| ; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 |
| ; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]] |
| ; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 |
| ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP: vector.body: |
| ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]] |
| ; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1 |
| ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP8]] |
| ; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] |
| ; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1 |
| ; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]] |
| ; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] |
| ; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] |
| ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 |
| ; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) |
| ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP8]] |
| ; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] |
| ; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 |
| ; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] |
| ; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] |
| ; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]] |
| ; NO-VP-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE]]) |
| ; NO-VP-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4 |
| ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] |
| ; NO-VP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; NO-VP: middle.block: |
| ; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[SCALAR_PH]] |
| ; NO-VP: scalar.ph: |
| ; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL]], [[ENTRY:%.*]] ] |
| ; NO-VP-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] |
| ; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] |
| ; NO-VP: for.body: |
| ; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ] |
| ; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY1]] ] |
| ; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 |
| ; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] |
| ; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 |
| ; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] |
| ; NO-VP-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 |
| ; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1 |
| ; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 |
| ; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; NO-VP: loopend: |
| ; NO-VP-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %add.phi = phi i64 [ %startval, %entry ], [ %add, %for.body ] |
| %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] |
| %add = add i64 %add.phi, -1 |
| %gepl = getelementptr inbounds i32, ptr %ptr, i64 %add |
| %tmp = load i32, ptr %gepl, align 4 |
| %geps = getelementptr inbounds i32, ptr %ptr2, i64 %add |
| store i32 %tmp, ptr %geps, align 4 |
| %inc = add i32 %i, 1 |
| %exitcond = icmp ne i32 %inc, 1024 |
| br i1 %exitcond, label %for.body, label %loopend |
| |
| loopend: |
| ret void |
| } |
| |
| define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noalias %ptr1, ptr noalias %ptr2) { |
| ; IF-EVL-LABEL: @reverse_load_store_masked( |
| ; IF-EVL-NEXT: entry: |
| ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; IF-EVL: vector.ph: |
| ; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 |
| ; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 |
| ; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]] |
| ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 |
| ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; IF-EVL: vector.body: |
| ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) |
| ; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[OFFSET_IDX3:%.*]] = trunc i64 [[EVL_BASED_IV]] to i32 |
| ; IF-EVL-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], -1 |
| ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[OFFSET_IDX3]] |
| ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 100) |
| ; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP11]] |
| ; IF-EVL-NEXT: [[TMP26:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[TMP17:%.*]] = mul i64 0, [[TMP26]] |
| ; IF-EVL-NEXT: [[TMP15:%.*]] = sub i64 [[TMP26]], 1 |
| ; IF-EVL-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP15]] |
| ; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[TMP16]], i64 [[TMP17]] |
| ; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP19]], i64 [[TMP18]] |
| ; IF-EVL-NEXT: [[VP_REVERSE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[VP_OP_LOAD4:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP20]], <vscale x 4 x i1> [[VP_REVERSE_MASK]], i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[VP_REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD4]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP11]] |
| ; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP27]] |
| ; IF-EVL-NEXT: [[TMP30:%.*]] = sub i64 [[TMP27]], 1 |
| ; IF-EVL-NEXT: [[TMP23:%.*]] = mul i64 -1, [[TMP30]] |
| ; IF-EVL-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP22]] |
| ; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP24]], i64 [[TMP23]] |
| ; IF-EVL-NEXT: [[VP_REVERSE5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_REVERSE]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[VP_REVERSE_MASK6:%.*]] = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) |
| ; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> [[VP_REVERSE_MASK6]], i32 [[TMP5]]) |
| ; IF-EVL-NEXT: [[TMP28:%.*]] = zext i32 [[TMP5]] to i64 |
| ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP28]], [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]] |
| ; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024 |
| ; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; IF-EVL: middle.block: |
| ; IF-EVL-NEXT: br label [[LOOPEND:%.*]] |
| ; IF-EVL: scalar.ph: |
| ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[STARTVAL]], [[ENTRY:%.*]] ] |
| ; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ] |
| ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] |
| ; IF-EVL: for.body: |
| ; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] |
| ; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ] |
| ; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 |
| ; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] |
| ; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 |
| ; IF-EVL-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100 |
| ; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; IF-EVL: if.then: |
| ; IF-EVL-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]] |
| ; IF-EVL-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4 |
| ; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] |
| ; IF-EVL-NEXT: store i32 [[V]], ptr [[GEPS]], align 4 |
| ; IF-EVL-NEXT: br label [[FOR_INC]] |
| ; IF-EVL: for.inc: |
| ; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 |
| ; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 |
| ; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]], !llvm.loop [[LOOP6:![0-9]+]] |
| ; IF-EVL: loopend: |
| ; IF-EVL-NEXT: ret void |
| ; |
| ; NO-VP-LABEL: @reverse_load_store_masked( |
| ; NO-VP-NEXT: entry: |
| ; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 |
| ; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] |
| ; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[ENTRY:%.*]], label [[VECTOR_PH:%.*]] |
| ; NO-VP: vector.ph: |
| ; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 |
| ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] |
| ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] |
| ; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() |
| ; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 |
| ; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL1:%.*]], [[N_VEC]] |
| ; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 |
| ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] |
| ; NO-VP: vector.body: |
| ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] |
| ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL1]], [[INDEX]] |
| ; NO-VP-NEXT: [[OFFSET_IDX1:%.*]] = trunc i64 [[INDEX]] to i32 |
| ; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1 |
| ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[OFFSET_IDX1]] |
| ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4 |
| ; NO-VP-NEXT: [[TMP10:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 100) |
| ; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP8]] |
| ; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP5]] |
| ; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP5]], 1 |
| ; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 -1, [[TMP13]] |
| ; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP11]], i64 [[TMP12]] |
| ; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP14]] |
| ; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]]) |
| ; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[REVERSE]], <vscale x 4 x i32> poison) |
| ; NO-VP-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]]) |
| ; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP8]] |
| ; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP5]] |
| ; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP5]], 1 |
| ; NO-VP-NEXT: [[TMP20:%.*]] = mul i64 -1, [[TMP19]] |
| ; NO-VP-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i64 [[TMP18]] |
| ; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP20]] |
| ; NO-VP-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]]) |
| ; NO-VP-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE2]]) |
| ; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[REVERSE4]], ptr [[TMP22]], i32 4, <vscale x 4 x i1> [[REVERSE3]]) |
| ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] |
| ; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; NO-VP: middle.block: |
| ; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] |
| ; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[ENTRY]] |
| ; NO-VP: scalar.ph: |
| ; NO-VP-NEXT: [[STARTVAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL1]], [[ENTRY1:%.*]] ] |
| ; NO-VP-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ] |
| ; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] |
| ; NO-VP: for.body: |
| ; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] |
| ; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL5]], [[ENTRY]] ], [ [[INC:%.*]], [[FOR_INC]] ] |
| ; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 |
| ; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] |
| ; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 |
| ; NO-VP-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100 |
| ; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] |
| ; NO-VP: if.then: |
| ; NO-VP-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]] |
| ; NO-VP-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4 |
| ; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] |
| ; NO-VP-NEXT: store i32 [[V]], ptr [[GEPS]], align 4 |
| ; NO-VP-NEXT: br label [[FOR_INC]] |
| ; NO-VP: for.inc: |
| ; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1 |
| ; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 |
| ; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; NO-VP: loopend: |
| ; NO-VP-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %add.phi = phi i64 [ %startval, %entry ], [ %add, %for.inc ] |
| %i = phi i32 [ 0, %entry ], [ %inc, %for.inc ] |
| %add = add i64 %add.phi, -1 |
| %gepl = getelementptr inbounds i32, ptr %ptr, i32 %i |
| %tmp = load i32, ptr %gepl, align 4 |
| %cmp1 = icmp slt i32 %tmp, 100 |
| br i1 %cmp1, label %if.then, label %for.inc |
| |
| if.then: |
| %gepl1 = getelementptr inbounds i32, ptr %ptr1, i64 %add |
| %v = load i32, ptr %gepl1, align 4 |
| %geps = getelementptr inbounds i32, ptr %ptr2, i64 %add |
| store i32 %v, ptr %geps, align 4 |
| br label %for.inc |
| |
| for.inc: |
| %inc = add i32 %i, 1 |
| %exitcond = icmp ne i32 %inc, 1024 |
| br i1 %exitcond, label %for.body, label %loopend |
| |
| loopend: |
| ret void |
| } |
| |
| ; From a miscompile originally reported at |
| ; https://github.com/llvm/llvm-project/issues/122681 |
| |
| define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d) { |
| ; IF-EVL-LABEL: @multiple_reverse_vector_pointer( |
| ; IF-EVL-NEXT: entry: |
| ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; IF-EVL: vector.ph: |
| ; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 |
| ; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 |
| ; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]] |
| ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] |
| ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| ; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 |
| ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; IF-EVL: vector.body: |
| ; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true) |
| ; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]] |
| ; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP6]] to i64 |
| ; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP9]] |
| ; IF-EVL-NEXT: [[TMP29:%.*]] = sub i64 [[TMP9]], 1 |
| ; IF-EVL-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP29]] |
| ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP8]], i64 [[TMP10]] |
| ; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP11]] |
| ; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: [[VP_REVERSE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> [[VP_OP_LOAD]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B:%.*]], <vscale x 16 x i8> [[VP_REVERSE]] |
| ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]] |
| ; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP6]] to i64 |
| ; IF-EVL-NEXT: [[TMP17:%.*]] = mul i64 0, [[TMP16]] |
| ; IF-EVL-NEXT: [[TMP30:%.*]] = sub i64 [[TMP16]], 1 |
| ; IF-EVL-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP30]] |
| ; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP17]] |
| ; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP18]] |
| ; IF-EVL-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_REVERSE1]], ptr align 1 [[TMP20]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]] |
| ; IF-EVL-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64 |
| ; IF-EVL-NEXT: [[TMP23:%.*]] = mul i64 0, [[TMP22]] |
| ; IF-EVL-NEXT: [[TMP31:%.*]] = sub i64 [[TMP22]], 1 |
| ; IF-EVL-NEXT: [[TMP24:%.*]] = mul i64 -1, [[TMP31]] |
| ; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP21]], i64 [[TMP23]] |
| ; IF-EVL-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[TMP24]] |
| ; IF-EVL-NEXT: [[VP_REVERSE2:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_REVERSE2]], ptr align 1 [[TMP26]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]]) |
| ; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64 |
| ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP27]], [[EVL_BASED_IV]] |
| ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]] |
| ; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025 |
| ; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] |
| ; IF-EVL: middle.block: |
| ; IF-EVL-NEXT: br label [[EXIT:%.*]] |
| ; IF-EVL: scalar.ph: |
| ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ] |
| ; IF-EVL-NEXT: br label [[LOOP:%.*]] |
| ; IF-EVL: loop: |
| ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] |
| ; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] |
| ; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 |
| ; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] |
| ; IF-EVL-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1 |
| ; IF-EVL-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]] |
| ; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1 |
| ; IF-EVL-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]] |
| ; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1 |
| ; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 |
| ; IF-EVL-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0 |
| ; IF-EVL-NEXT: br i1 [[CMP_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] |
| ; IF-EVL: exit: |
| ; IF-EVL-NEXT: ret void |
| ; |
| ; NO-VP-LABEL: @multiple_reverse_vector_pointer( |
| ; NO-VP-NEXT: entry: |
| ; NO-VP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; NO-VP: vector.ph: |
| ; NO-VP-NEXT: br label [[LOOP:%.*]] |
| ; NO-VP: vector.body: |
| ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] |
| ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[INDEX]] |
| ; NO-VP-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]] |
| ; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0 |
| ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 -15 |
| ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 |
| ; NO-VP-NEXT: [[REVERSE:%.*]] = shufflevector <16 x i8> [[WIDE_LOAD]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> |
| ; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B:%.*]], <16 x i8> [[REVERSE]] |
| ; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> [[TMP3]], i32 1, <16 x i1> splat (i1 true), <16 x i8> poison) |
| ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]] |
| ; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 |
| ; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 -15 |
| ; NO-VP-NEXT: [[REVERSE1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_GATHER]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> |
| ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP6]], align 1 |
| ; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]] |
| ; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 0 |
| ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i32 -15 |
| ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP9]], align 1 |
| ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 |
| ; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] |
| ; NO-VP: middle.block: |
| ; NO-VP-NEXT: br label [[SCALAR_PH]] |
| ; NO-VP: scalar.ph: |
| ; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1024, [[ENTRY:%.*]] ] |
| ; NO-VP-NEXT: br label [[LOOP1:%.*]] |
| ; NO-VP: loop: |
| ; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP1]] ] |
| ; NO-VP-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] |
| ; NO-VP-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 |
| ; NO-VP-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] |
| ; NO-VP-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1 |
| ; NO-VP-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]] |
| ; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1 |
| ; NO-VP-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]] |
| ; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1 |
| ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 |
| ; NO-VP-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0 |
| ; NO-VP-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP1]], !llvm.loop [[LOOP7:![0-9]+]] |
| ; NO-VP: exit: |
| ; NO-VP-NEXT: ret void |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 1024, %entry ], [ %iv.next, %loop ] |
| |
| %gep.a = getelementptr i8, ptr %a, i64 %iv |
| %x = load i8, ptr %gep.a |
| |
| %gep.b = getelementptr i8, ptr %b, i8 %x |
| %y = load i8, ptr %gep.b |
| |
| %gep.c = getelementptr i8, ptr %c, i64 %iv |
| store i8 %y, ptr %gep.c |
| |
| %gep.d = getelementptr i8, ptr %d, i64 %iv |
| store i8 %y, ptr %gep.d |
| |
| %iv.next = add i64 %iv, -1 |
| %cmp.not = icmp eq i64 %iv, 0 |
| br i1 %cmp.not, label %exit, label %loop |
| |
| exit: |
| ret void |
| } |