| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: opt -passes=loop-vectorize -force-vector-width=4 -enable-vplan-native-path -S %s | FileCheck %s |
| |
| ; Test that VPlan native path is able to widen call intructions like |
| ; llvm.sqrt.* intrincis calls. |
| |
| declare double @llvm.sqrt.f64(double %0) |
| define void @widen_call_instruction(ptr noalias nocapture readonly %a.in, ptr noalias nocapture readonly %b.in, ptr noalias nocapture %c.out) { |
| ; CHECK-LABEL: define void @widen_call_instruction( |
| ; CHECK-SAME: ptr noalias readonly captures(none) [[A_IN:%.*]], ptr noalias readonly captures(none) [[B_IN:%.*]], ptr noalias captures(none) [[C_OUT:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ] |
| ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[A_IN]], <4 x i64> [[VEC_IND]] |
| ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP0]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[B_IN]], <4 x i64> [[VEC_IND]] |
| ; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> [[TMP1]], i32 8, <4 x i1> splat (i1 true), <4 x double> poison) |
| ; CHECK-NEXT: [[TMP2:%.*]] = call <4 x double> @llvm.sqrt.v4f64(<4 x double> [[WIDE_MASKED_GATHER1]]) |
| ; CHECK-NEXT: br label %[[FOR2_HEADER2:.*]] |
| ; CHECK: [[FOR2_HEADER2]]: |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR2_HEADER2]] ] |
| ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ [[WIDE_MASKED_GATHER]], %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[FOR2_HEADER2]] ] |
| ; CHECK-NEXT: [[TMP3]] = fadd <4 x double> [[TMP2]], [[VEC_PHI3]] |
| ; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i32> [[VEC_PHI]], splat (i32 1) |
| ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i32> [[TMP4]], splat (i32 10000) |
| ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0 |
| ; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR2_HEADER2]] |
| ; CHECK: [[VECTOR_LATCH]]: |
| ; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x double> [ [[TMP3]], %[[FOR2_HEADER2]] ] |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], <4 x i64> [[VEC_IND]] |
| ; CHECK-NEXT: call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> [[VEC_PHI4]], <4 x ptr> [[TMP7]], i32 8, <4 x i1> splat (i1 true)) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) |
| ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| ; CHECK-NEXT: br label %[[FOR1_HEADER:.*]] |
| ; CHECK: [[FOR1_HEADER]]: |
| ; CHECK-NEXT: [[INDVAR1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVAR11:%.*]], %[[FOR1_LATCH:.*]] ] |
| ; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds double, ptr [[A_IN]], i64 [[INDVAR1]] |
| ; CHECK-NEXT: [[A:%.*]] = load double, ptr [[A_PTR]], align 8 |
| ; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds double, ptr [[B_IN]], i64 [[INDVAR1]] |
| ; CHECK-NEXT: [[B:%.*]] = load double, ptr [[B_PTR]], align 8 |
| ; CHECK-NEXT: [[B_SQRT:%.*]] = call double @llvm.sqrt.f64(double [[B]]) |
| ; CHECK-NEXT: br label %[[FOR2_HEADER:.*]] |
| ; CHECK: [[FOR2_HEADER]]: |
| ; CHECK-NEXT: [[INDVAR2:%.*]] = phi i32 [ 0, %[[FOR1_HEADER]] ], [ [[INDVAR21:%.*]], %[[FOR2_HEADER]] ] |
| ; CHECK-NEXT: [[A_REDUCTION:%.*]] = phi double [ [[A]], %[[FOR1_HEADER]] ], [ [[A_REDUCTION1:%.*]], %[[FOR2_HEADER]] ] |
| ; CHECK-NEXT: [[A_REDUCTION1]] = fadd double [[B_SQRT]], [[A_REDUCTION]] |
| ; CHECK-NEXT: [[INDVAR21]] = add nuw nsw i32 [[INDVAR2]], 1 |
| ; CHECK-NEXT: [[FOR2_COND:%.*]] = icmp eq i32 [[INDVAR21]], 10000 |
| ; CHECK-NEXT: br i1 [[FOR2_COND]], label %[[FOR1_LATCH]], label %[[FOR2_HEADER]] |
| ; CHECK: [[FOR1_LATCH]]: |
| ; CHECK-NEXT: [[A_REDUCTION1_LCSSA:%.*]] = phi double [ [[A_REDUCTION1]], %[[FOR2_HEADER]] ] |
| ; CHECK-NEXT: [[C_PTR:%.*]] = getelementptr inbounds double, ptr [[C_OUT]], i64 [[INDVAR1]] |
| ; CHECK-NEXT: store double [[A_REDUCTION1_LCSSA]], ptr [[C_PTR]], align 8 |
| ; CHECK-NEXT: [[INDVAR11]] = add nuw nsw i64 [[INDVAR1]], 1 |
| ; CHECK-NEXT: [[FOR1_COND:%.*]] = icmp eq i64 [[INDVAR11]], 1000 |
| ; CHECK-NEXT: br i1 [[FOR1_COND]], label %[[EXIT]], label %[[FOR1_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %for1.header |
| |
| for1.header: |
| %indvar1 = phi i64 [ 0, %entry ], [ %indvar11, %for1.latch ] |
| %a.ptr = getelementptr inbounds double, ptr %a.in, i64 %indvar1 |
| %a = load double, ptr %a.ptr, align 8 |
| %b.ptr = getelementptr inbounds double, ptr %b.in, i64 %indvar1 |
| %b = load double, ptr %b.ptr, align 8 |
| %b.sqrt = call double @llvm.sqrt.f64(double %b) |
| br label %for2.header |
| |
| for2.header: |
| %indvar2 = phi i32 [ 0, %for1.header ], [ %indvar21, %for2.header ] |
| %a.reduction = phi double [ %a, %for1.header ], [ %a.reduction1, %for2.header ] |
| %a.reduction1 = fadd double %b.sqrt, %a.reduction |
| %indvar21 = add nuw nsw i32 %indvar2, 1 |
| %for2.cond = icmp eq i32 %indvar21, 10000 |
| br i1 %for2.cond, label %for1.latch, label %for2.header |
| |
| for1.latch: |
| %c.ptr = getelementptr inbounds double, ptr %c.out, i64 %indvar1 |
| store double %a.reduction1, ptr %c.ptr, align 8 |
| %indvar11 = add nuw nsw i64 %indvar1, 1 |
| %for1.cond = icmp eq i64 %indvar11, 1000 |
| br i1 %for1.cond, label %exit, label %for1.header, !llvm.loop !0 |
| |
| exit: |
| ret void |
| } |
| |
| ; Check we do not try to widen non-intrinsic calls, |
| ; https://github.com/llvm/llvm-project/issues/131071. |
| define void @call_to_non_intrinsic() { |
| ; CHECK-LABEL: define void @call_to_non_intrinsic() { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: br label %[[OUTER_HEADER:.*]] |
| ; CHECK: [[OUTER_HEADER]]: |
| ; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[OUTER_IV_NEXT:%.*]], %[[OUTER_LATCH:.*]] ] |
| ; CHECK-NEXT: br label %[[INNER_HEADER:.*]] |
| ; CHECK: [[INNER_HEADER]]: |
| ; CHECK-NEXT: [[INNER_IV:%.*]] = phi i64 [ 0, %[[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], %[[INNER_HEADER]] ] |
| ; CHECK-NEXT: call void @use() |
| ; CHECK-NEXT: [[INNER_IV_NEXT]] = add i64 [[INNER_IV]], 1 |
| ; CHECK-NEXT: [[INNER_EC:%.*]] = icmp eq i64 [[INNER_IV_NEXT]], 100 |
| ; CHECK-NEXT: br i1 [[INNER_EC]], label %[[OUTER_LATCH]], label %[[INNER_HEADER]] |
| ; CHECK: [[OUTER_LATCH]]: |
| ; CHECK-NEXT: [[OUTER_IV_NEXT]] = add i64 [[OUTER_IV]], 1 |
| ; CHECK-NEXT: [[OUTER_EC:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], 100 |
| ; CHECK-NEXT: br i1 [[OUTER_EC]], label %[[EXIT:.*]], label %[[OUTER_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %outer.header |
| |
| outer.header: |
| %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %outer.latch ] |
| br label %inner.header |
| |
| inner.header: |
| %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.header ] |
| call void @use() |
| %inner.iv.next = add i64 %inner.iv, 1 |
| %inner.ec = icmp eq i64 %inner.iv.next, 100 |
| br i1 %inner.ec, label %outer.latch, label %inner.header |
| |
| outer.latch: |
| %outer.iv.next = add i64 %outer.iv, 1 |
| %outer.ec = icmp eq i64 %outer.iv.next, 100 |
| br i1 %outer.ec, label %exit, label %outer.header, !llvm.loop !0 |
| |
| exit: |
| ret void |
| } |
| |
| declare void @use() |
| |
| !0 = distinct !{!0, !1} |
| !1 = !{!"llvm.loop.vectorize.enable", i1 true} |
| ;. |
| ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} |
| ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]]} |
| ; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true} |
| ;. |