blob: c10bfc32e6e2944c64a5c5bf536953f9191c0c63 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP
define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_sext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; IF-EVL-NEXT: [[TMP6:%.*]] = shl i64 [[N]], 2
; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
; IF-EVL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IF-EVL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
; IF-EVL-NEXT: [[TMP16:%.*]] = sext <vscale x 2 x i32> [[VP_OP_LOAD]] to <vscale x 2 x i64>
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV2:%.*]] = sext i32 [[TMP21]] to i64
; IF-EVL-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i64 [[CONV2]], ptr [[GEP4]], align 8
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_sext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 20)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-NEXT: [[TMP10:%.*]] = sext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV2:%.*]] = sext i32 [[TMP0]] to i64
; NO-VP-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i64 [[CONV2]], ptr [[GEP4]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
%gep = getelementptr inbounds i32, ptr %b, i64 %iv
%0 = load i32, ptr %gep, align 4
%conv2 = sext i32 %0 to i64
%gep4 = getelementptr inbounds i64, ptr %a, i64 %iv
store i64 %conv2, ptr %gep4, align 8
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_zext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; IF-EVL-NEXT: [[TMP6:%.*]] = shl i64 [[N]], 2
; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
; IF-EVL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IF-EVL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META9:![0-9]+]]
; IF-EVL-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i32> [[VP_OP_LOAD]] to <vscale x 2 x i64>
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = zext i32 [[TMP21]] to i64
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i64 [[CONV]], ptr [[GEP2]], align 8
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_zext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 20)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META9:![0-9]+]]
; NO-VP-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = zext i32 [[TMP0]] to i64
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i64 [[CONV]], ptr [[GEP2]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i32, ptr %b, i64 %iv
%0 = load i32, ptr %gep, align 4
%conv = zext i32 %0 to i64
%gep2 = getelementptr inbounds i64, ptr %a, i64 %iv
store i64 %conv, ptr %gep2, align 8
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_trunc(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 2
; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; IF-EVL-NEXT: [[TMP6:%.*]] = shl i64 [[N]], 3
; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
; IF-EVL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IF-EVL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META16:![0-9]+]]
; IF-EVL-NEXT: [[TMP16:%.*]] = trunc <vscale x 2 x i64> [[VP_OP_LOAD]] to <vscale x 2 x i32>
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i64, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[CONV:%.*]] = trunc i64 [[TMP21]] to i32
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_trunc(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 20)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2
; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3
; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8, !alias.scope [[META16:![0-9]+]]
; NO-VP-NEXT: [[TMP10:%.*]] = trunc <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x i32>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i32> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i64, ptr %b, i64 %iv
%0 = load i64, ptr %gep, align 8
%conv = trunc i64 %0 to i32
%gep2 = getelementptr inbounds i32, ptr %a, i64 %iv
store i32 %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fpext(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 3
; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; IF-EVL-NEXT: [[TMP6:%.*]] = shl i64 [[N]], 2
; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
; IF-EVL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IF-EVL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META23:![0-9]+]]
; IF-EVL-NEXT: [[TMP16:%.*]] = fpext <vscale x 2 x float> [[VP_OP_LOAD]] to <vscale x 2 x double>
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META26:![0-9]+]], !noalias [[META23]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fpext float [[TMP21]] to double
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store double [[CONV]], ptr [[GEP2]], align 8
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_fpext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP7]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[GEP]], align 4, !alias.scope [[META23:![0-9]+]]
; NO-VP-NEXT: [[TMP10:%.*]] = fpext <vscale x 2 x float> [[WIDE_LOAD]] to <vscale x 2 x double>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x double> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META26:![0-9]+]], !noalias [[META23]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store double [[CONV]], ptr [[GEP2]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds float, ptr %b, i64 %iv
%0 = load float, ptr %gep, align 4
%conv = fpext float %0 to double
%gep2 = getelementptr inbounds double, ptr %a, i64 %iv
store double %conv, ptr %gep2, align 8
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptrunc(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = shl i64 [[N]], 2
; IF-EVL-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; IF-EVL-NEXT: [[TMP6:%.*]] = shl i64 [[N]], 3
; IF-EVL-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
; IF-EVL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IF-EVL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META30:![0-9]+]]
; IF-EVL-NEXT: [[TMP16:%.*]] = fptrunc <vscale x 2 x double> [[VP_OP_LOAD]] to <vscale x 2 x float>
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META33:![0-9]+]], !noalias [[META30]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load double, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[CONV:%.*]] = fptrunc double [[TMP21]] to float
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_fptrunc(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP7]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2
; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3
; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[GEP]], align 8, !alias.scope [[META30:![0-9]+]]
; NO-VP-NEXT: [[TMP10:%.*]] = fptrunc <vscale x 2 x double> [[WIDE_LOAD]] to <vscale x 2 x float>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x float> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META33:![0-9]+]], !noalias [[META30]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load double, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP36:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds double, ptr %b, i64 %iv
%0 = load double, ptr %gep, align 8
%conv = fptrunc double %0 to float
%gep2 = getelementptr inbounds float, ptr %a, i64 %iv
store float %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_sitofp(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[B2]]
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP18:%.*]] = sitofp <vscale x 4 x i32> [[VP_OP_LOAD]] to <vscale x 4 x float>
; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP23]] to float
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_sitofp(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
; NO-VP-NEXT: [[TMP12:%.*]] = sitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP38:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i32, ptr %b, i64 %iv
%0 = load i32, ptr %gep, align 4
%conv = sitofp i32 %0 to float
%gep2 = getelementptr inbounds float, ptr %a, i64 %iv
store float %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_uitofp(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[B2]]
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP18:%.*]] = uitofp <vscale x 4 x i32> [[VP_OP_LOAD]] to <vscale x 4 x float>
; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = uitofp i32 [[TMP23]] to float
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP40:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_uitofp(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
; NO-VP-NEXT: [[TMP12:%.*]] = uitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = uitofp i32 [[TMP0]] to float
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP40:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i32, ptr %b, i64 %iv
%0 = load i32, ptr %gep, align 4
%conv = uitofp i32 %0 to float
%gep2 = getelementptr inbounds float, ptr %a, i64 %iv
store float %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptosi(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[B2]]
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP18:%.*]] = fptosi <vscale x 4 x float> [[VP_OP_LOAD]] to <vscale x 4 x i32>
; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fptosi float [[TMP23]] to i32
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP42:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_fptosi(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
; NO-VP-NEXT: [[TMP12:%.*]] = fptosi <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP42:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds float, ptr %b, i64 %iv
%0 = load float, ptr %gep, align 4
%conv = fptosi float %0 to i32
%gep2 = getelementptr inbounds i32, ptr %a, i64 %iv
store i32 %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_fptoui(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[B2]]
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP18:%.*]] = fptoui <vscale x 4 x float> [[VP_OP_LOAD]] to <vscale x 4 x i32>
; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load float, ptr [[GEP]], align 4
; IF-EVL-NEXT: [[CONV:%.*]] = fptoui float [[TMP23]] to i32
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP44:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_fptoui(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
; NO-VP-NEXT: [[TMP12:%.*]] = fptoui <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fptoui float [[TMP0]] to i32
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP44:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds float, ptr %b, i64 %iv
%0 = load float, ptr %gep, align 4
%conv = fptoui float %0 to i32
%gep2 = getelementptr inbounds i32, ptr %a, i64 %iv
store i32 %conv, ptr %gep2, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_inttoptr(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; IF-EVL-NEXT: br label %[[VECTOR_MEMCHECK:.*]]
; IF-EVL: [[VECTOR_MEMCHECK]]:
; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[B2]]
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP18:%.*]] = inttoptr <vscale x 2 x i64> [[VP_OP_LOAD]] to <vscale x 2 x ptr>
; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[TMP18]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i64, ptr [[GEP]], align 8
; IF-EVL-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store ptr [[TMP24]], ptr [[GEP2]], align 8
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP46:![0-9]+]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_inttoptr(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; NO-VP-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP10]], 1
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP9]], i64 16)
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; NO-VP: [[VECTOR_MEMCHECK]]:
; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8
; NO-VP-NEXT: [[TMP12:%.*]] = inttoptr <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x ptr>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x ptr> [[TMP12]], ptr [[TMP13]], align 8
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store ptr [[TMP1]], ptr [[GEP2]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP46:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i64, ptr %b, i64 %iv
%0 = load i64, ptr %gep, align 8
%1 = inttoptr i64 %0 to ptr
%gep2 = getelementptr inbounds ptr, ptr %a, i64 %iv
store ptr %1, ptr %gep2, align 8
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-LABEL: define void @vp_ptrtoint(
; IF-EVL-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*:]]
; IF-EVL-NEXT: br label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
; IF-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul <vscale x 2 x i64> [[TMP9]], splat (i64 1)
; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP10]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], <vscale x 2 x i64> [[VEC_IND]]
; IF-EVL-NEXT: [[TMP15:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP14]] to <vscale x 2 x i64>
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH:.*]]:
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
; NO-VP-LABEL: define void @vp_ptrtoint(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP13]], 1
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; NO-VP: [[VECTOR_PH]]:
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP3]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], <vscale x 2 x i64> [[VEC_IND]]
; NO-VP-NEXT: [[TMP10:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP9]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; NO-VP: [[SCALAR_PH]]:
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP48:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep = getelementptr inbounds i32, ptr %b, i64 %iv
%0 = ptrtoint ptr %gep to i64
%gep2 = getelementptr inbounds i64, ptr %a, i64 %iv
store i64 %0, ptr %gep2, align 8
%iv.next = add nuw nsw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %N
br i1 %exitcond.not, label %exit, label %loop
exit:
ret void
}
;.
; IF-EVL: [[META0]] = !{[[META1:![0-9]+]]}
; IF-EVL: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
; IF-EVL: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
; IF-EVL: [[META3]] = !{[[META4:![0-9]+]]}
; IF-EVL: [[META4]] = distinct !{[[META4]], [[META2]]}
; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
; IF-EVL: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
; IF-EVL: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
; IF-EVL: [[META9]] = !{[[META10:![0-9]+]]}
; IF-EVL: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
; IF-EVL: [[META11]] = distinct !{[[META11]], !"LVerDomain"}
; IF-EVL: [[META12]] = !{[[META13:![0-9]+]]}
; IF-EVL: [[META13]] = distinct !{[[META13]], [[META11]]}
; IF-EVL: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP15]] = distinct !{[[LOOP15]], [[META6]]}
; IF-EVL: [[META16]] = !{[[META17:![0-9]+]]}
; IF-EVL: [[META17]] = distinct !{[[META17]], [[META18:![0-9]+]]}
; IF-EVL: [[META18]] = distinct !{[[META18]], !"LVerDomain"}
; IF-EVL: [[META19]] = !{[[META20:![0-9]+]]}
; IF-EVL: [[META20]] = distinct !{[[META20]], [[META18]]}
; IF-EVL: [[LOOP21]] = distinct !{[[LOOP21]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP22]] = distinct !{[[LOOP22]], [[META6]]}
; IF-EVL: [[META23]] = !{[[META24:![0-9]+]]}
; IF-EVL: [[META24]] = distinct !{[[META24]], [[META25:![0-9]+]]}
; IF-EVL: [[META25]] = distinct !{[[META25]], !"LVerDomain"}
; IF-EVL: [[META26]] = !{[[META27:![0-9]+]]}
; IF-EVL: [[META27]] = distinct !{[[META27]], [[META25]]}
; IF-EVL: [[LOOP28]] = distinct !{[[LOOP28]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP29]] = distinct !{[[LOOP29]], [[META6]]}
; IF-EVL: [[META30]] = !{[[META31:![0-9]+]]}
; IF-EVL: [[META31]] = distinct !{[[META31]], [[META32:![0-9]+]]}
; IF-EVL: [[META32]] = distinct !{[[META32]], !"LVerDomain"}
; IF-EVL: [[META33]] = !{[[META34:![0-9]+]]}
; IF-EVL: [[META34]] = distinct !{[[META34]], [[META32]]}
; IF-EVL: [[LOOP35]] = distinct !{[[LOOP35]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP36]] = distinct !{[[LOOP36]], [[META6]]}
; IF-EVL: [[LOOP37]] = distinct !{[[LOOP37]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP38]] = distinct !{[[LOOP38]], [[META6]]}
; IF-EVL: [[LOOP39]] = distinct !{[[LOOP39]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP40]] = distinct !{[[LOOP40]], [[META6]]}
; IF-EVL: [[LOOP41]] = distinct !{[[LOOP41]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP42]] = distinct !{[[LOOP42]], [[META6]]}
; IF-EVL: [[LOOP43]] = distinct !{[[LOOP43]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP44]] = distinct !{[[LOOP44]], [[META6]]}
; IF-EVL: [[LOOP45]] = distinct !{[[LOOP45]], [[META6]], [[META7]]}
; IF-EVL: [[LOOP46]] = distinct !{[[LOOP46]], [[META6]]}
; IF-EVL: [[LOOP47]] = distinct !{[[LOOP47]], [[META6]], [[META7]]}
;.
; NO-VP: [[META0]] = !{[[META1:![0-9]+]]}
; NO-VP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
; NO-VP: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
; NO-VP: [[META3]] = !{[[META4:![0-9]+]]}
; NO-VP: [[META4]] = distinct !{[[META4]], [[META2]]}
; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
; NO-VP: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
; NO-VP: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
; NO-VP: [[META9]] = !{[[META10:![0-9]+]]}
; NO-VP: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
; NO-VP: [[META11]] = distinct !{[[META11]], !"LVerDomain"}
; NO-VP: [[META12]] = !{[[META13:![0-9]+]]}
; NO-VP: [[META13]] = distinct !{[[META13]], [[META11]]}
; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META7]]}
; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META6]]}
; NO-VP: [[META16]] = !{[[META17:![0-9]+]]}
; NO-VP: [[META17]] = distinct !{[[META17]], [[META18:![0-9]+]]}
; NO-VP: [[META18]] = distinct !{[[META18]], !"LVerDomain"}
; NO-VP: [[META19]] = !{[[META20:![0-9]+]]}
; NO-VP: [[META20]] = distinct !{[[META20]], [[META18]]}
; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META6]], [[META7]]}
; NO-VP: [[LOOP22]] = distinct !{[[LOOP22]], [[META6]]}
; NO-VP: [[META23]] = !{[[META24:![0-9]+]]}
; NO-VP: [[META24]] = distinct !{[[META24]], [[META25:![0-9]+]]}
; NO-VP: [[META25]] = distinct !{[[META25]], !"LVerDomain"}
; NO-VP: [[META26]] = !{[[META27:![0-9]+]]}
; NO-VP: [[META27]] = distinct !{[[META27]], [[META25]]}
; NO-VP: [[LOOP28]] = distinct !{[[LOOP28]], [[META6]], [[META7]]}
; NO-VP: [[LOOP29]] = distinct !{[[LOOP29]], [[META6]]}
; NO-VP: [[META30]] = !{[[META31:![0-9]+]]}
; NO-VP: [[META31]] = distinct !{[[META31]], [[META32:![0-9]+]]}
; NO-VP: [[META32]] = distinct !{[[META32]], !"LVerDomain"}
; NO-VP: [[META33]] = !{[[META34:![0-9]+]]}
; NO-VP: [[META34]] = distinct !{[[META34]], [[META32]]}
; NO-VP: [[LOOP35]] = distinct !{[[LOOP35]], [[META6]], [[META7]]}
; NO-VP: [[LOOP36]] = distinct !{[[LOOP36]], [[META6]]}
; NO-VP: [[LOOP37]] = distinct !{[[LOOP37]], [[META6]], [[META7]]}
; NO-VP: [[LOOP38]] = distinct !{[[LOOP38]], [[META6]]}
; NO-VP: [[LOOP39]] = distinct !{[[LOOP39]], [[META6]], [[META7]]}
; NO-VP: [[LOOP40]] = distinct !{[[LOOP40]], [[META6]]}
; NO-VP: [[LOOP41]] = distinct !{[[LOOP41]], [[META6]], [[META7]]}
; NO-VP: [[LOOP42]] = distinct !{[[LOOP42]], [[META6]]}
; NO-VP: [[LOOP43]] = distinct !{[[LOOP43]], [[META6]], [[META7]]}
; NO-VP: [[LOOP44]] = distinct !{[[LOOP44]], [[META6]]}
; NO-VP: [[LOOP45]] = distinct !{[[LOOP45]], [[META6]], [[META7]]}
; NO-VP: [[LOOP46]] = distinct !{[[LOOP46]], [[META6]]}
; NO-VP: [[LOOP47]] = distinct !{[[LOOP47]], [[META6]], [[META7]]}
; NO-VP: [[LOOP48]] = distinct !{[[LOOP48]], [[META7]], [[META6]]}
;.