blob: cd78aee4c7491a86d5c3c10993d4ccdd178fb22e [file]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
; RUN: opt -p loop-vectorize -mtriple=aarch64 -mattr=+sve -S %s | FileCheck %s
; The innermost block then.1 has a 25% chance of being executed according to
; BranchProbabilityInfo, but if we vectorize it then we will unconditionally
; execute it. Avoid this unprofitable vectorization by taking the nested
; probability into account in the cost model.
define void @nested(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) {
; CHECK-LABEL: define void @nested(
; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[X:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; CHECK-NEXT: br i1 [[C0]], label %[[THEN_0:.*]], label %[[LATCH]]
; CHECK: [[THEN_0]]:
; CHECK-NEXT: br i1 [[C1]], label %[[THEN_1:.*]], label %[[LATCH]]
; CHECK: [[THEN_1]]:
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i64, ptr [[P0]], i32 [[X]]
; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[GEP0]], align 8
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1]], i32 [[X]]
; CHECK-NEXT: [[Y:%.*]] = load i64, ptr [[GEP1]], align 8
; CHECK-NEXT: [[Z:%.*]] = udiv i64 [[X1]], [[Y]]
; CHECK-NEXT: store i64 [[Z]], ptr [[GEP1]], align 8
; CHECK-NEXT: br label %[[LATCH]]
; CHECK: [[LATCH]]:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[X]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024
; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ]
br i1 %c0, label %then.0, label %latch
then.0:
br i1 %c1, label %then.1, label %latch
then.1:
%gep0 = getelementptr i64, ptr %p0, i32 %iv
%x = load i64, ptr %gep0
%gep1 = getelementptr i64, ptr %p1, i32 %iv
%y = load i64, ptr %gep1
%z = udiv i64 %x, %y
store i64 %z, ptr %gep1
br label %latch
latch:
%iv.next = add i32 %iv, 1
%done = icmp eq i32 %iv.next, 1024
br i1 %done, label %exit, label %loop
exit:
ret void
}
; This is the same CFG as @nested above, but we have provided branch weights
; which tell BranchProbabilityInfo that then.1 will always be taken. In this
; case, we should vectorize because it is profitable.
define void @always_taken(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) {
; CHECK-LABEL: define void @always_taken(
; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1024, [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP4]], 1
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP3]], 1
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C1]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = select <vscale x 2 x i1> [[BROADCAST_SPLAT2]], <vscale x 2 x i1> [[BROADCAST_SPLAT]], <vscale x 2 x i1> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P0]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP3]] to i64
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[TMP10]], i64 [[TMP7]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP10]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> poison)
; CHECK-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP20]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> poison)
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP9]], i64 [[TMP7]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> poison)
; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP12]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> poison)
; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD4]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD5]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP15:%.*]] = udiv <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], [[TMP21]]
; CHECK-NEXT: [[TMP22:%.*]] = udiv <vscale x 2 x i64> [[WIDE_MASKED_LOAD3]], [[TMP14]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP9]], <vscale x 2 x i1> [[TMP6]])
; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP22]], ptr align 8 [[TMP12]], <vscale x 2 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LATCH:.*]] ]
; CHECK-NEXT: br i1 [[C0]], label %[[THEN_0:.*]], label %[[LATCH]], !prof [[PROF3:![0-9]+]]
; CHECK: [[THEN_0]]:
; CHECK-NEXT: br i1 [[C1]], label %[[THEN_1:.*]], label %[[LATCH]], !prof [[PROF3]]
; CHECK: [[THEN_1]]:
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i64, ptr [[P0]], i32 [[IV1]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP0]], align 8
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1]], i32 [[IV1]]
; CHECK-NEXT: [[Y:%.*]] = load i64, ptr [[GEP1]], align 8
; CHECK-NEXT: [[Z:%.*]] = udiv i64 [[X]], [[Y]]
; CHECK-NEXT: store i64 [[Z]], ptr [[GEP1]], align 8
; CHECK-NEXT: br label %[[LATCH]]
; CHECK: [[LATCH]]:
; CHECK-NEXT: [[IV_NEXT1]] = add i32 [[IV1]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT1]], 1024
; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ]
br i1 %c0, label %then.0, label %latch, !prof !4
then.0:
br i1 %c1, label %then.1, label %latch, !prof !4
then.1:
%gep0 = getelementptr i64, ptr %p0, i32 %iv
%x = load i64, ptr %gep0
%gep1 = getelementptr i64, ptr %p1, i32 %iv
%y = load i64, ptr %gep1
%z = udiv i64 %x, %y
store i64 %z, ptr %gep1
br label %latch
latch:
%iv.next = add i32 %iv, 1
%done = icmp eq i32 %iv.next, 1024
br i1 %done, label %exit, label %loop
exit:
ret void
}
!4 = !{!"branch_weights", i32 1, i32 0}