| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 5 |
| ; RUN: opt < %s -passes="loop-vectorize" -force-vector-interleave=1 -force-vector-width=4 -S | FileCheck %s |
| |
| target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" |
| |
| ; This test checks that we can vectorize loop with reduction variable |
| ; stored in an invariant address. |
| ; |
| ; int sum = 0; |
| ; for(i=0..N) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META0:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: store i32 [[TMP3]], ptr [[GEP_DST]], align 4, !alias.scope [[META6:![0-9]+]], !noalias [[META0]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| store i32 0, ptr %gep.dst, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| store i32 %add, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Same as above but with floating point numbers instead. |
| ; |
| ; float sum = 0; |
| ; for(i=0..N) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store_fadd_fast(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_fadd_fast( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4, !alias.scope [[META9:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = fadd fast <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP1]]) |
| ; CHECK-NEXT: store float [[TMP3]], ptr [[GEP_DST]], align 4, !alias.scope [[META13:![0-9]+]], !noalias [[META9]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds float, ptr %dst, i64 42 |
| store float 0.000000e+00, ptr %gep.dst, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds float, ptr %src, i64 %iv |
| %0 = load float, ptr %gep.src, align 4 |
| %add = fadd fast float %sum, %0 |
| store float %add, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Check that if we have a read from an invariant address, we do not vectorize. |
| ; |
| ; int sum = 0; |
| ; for(i=0..N) { |
| ; sum += src[i]; |
| ; dst.2[i] = dst[42]; |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store_load(ptr %dst, ptr readonly %src, ptr noalias %dst.2) { |
| ; CHECK-LABEL: define void @reduc_store_load( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]], ptr noalias [[DST_2:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_SRC]], align 4 |
| ; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM]], [[TMP0]] |
| ; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr inbounds i32, ptr [[DST_2]], i64 [[IV]] |
| ; CHECK-NEXT: store i32 [[LV]], ptr [[GEP_DST_2]], align 4 |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| store i32 0, ptr %gep.dst, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| %lv = load i32, ptr %gep.dst |
| %gep.dst.2 = getelementptr inbounds i32, ptr %dst.2, i64 %iv |
| store i32 %lv, ptr %gep.dst.2, align 4 |
| store i32 %add, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Check that if we have a read from an invariant address, we do not vectorize, |
| ; even if we vectorize with runtime checks. The test below is a variant of |
| ; @reduc_store_load with a non-constant dependence distance, resulting in |
| ; vectorization with runtime checks. |
| ; |
| define void @reduc_store_load_with_non_constant_distance_dependence(ptr %dst, ptr noalias %dst.2, i64 %off) { |
| ; CHECK-LABEL: define void @reduc_store_load_with_non_constant_distance_dependence( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr noalias [[DST_2:%.*]], i64 [[OFF:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: [[DST_2_OFF:%.*]] = getelementptr inbounds i32, ptr [[DST_2]], i64 [[OFF]] |
| ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[DST_2]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_SRC]], align 4 |
| ; CHECK-NEXT: [[IV_OFF:%.*]] = mul i64 [[IV]], 2 |
| ; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM]], [[TMP0]] |
| ; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[DST_2_OFF]], i64 [[IV]] |
| ; CHECK-NEXT: store i32 [[LV]], ptr [[GEP_SRC_2]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| %dst.2.off = getelementptr inbounds i32, ptr %dst.2, i64 %off |
| store i32 0, ptr %gep.dst, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %dst.2, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %iv.off = mul i64 %iv, 2 |
| %add = add nsw i32 %sum, %0 |
| %lv = load i32, ptr %gep.dst |
| store i32 %add, ptr %gep.dst, align 4 |
| %gep.src.2 = getelementptr inbounds i32, ptr %dst.2.off, i64 %iv |
| store i32 %lv, ptr %gep.src.2, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| |
| ; Final value is not guaranteed to be stored in an invariant address. |
| ; We don't vectorize in that case. |
| ; |
| ; int sum = 0; |
| ; for(i=0..N) { |
| ; int diff = y[i] - x[i]; |
| ; if (diff > 0) { |
| ; sum = += diff; |
| ; *t = sum; |
| ; } |
| ; } |
| define void @reduc_cond_store(ptr %t, ptr readonly %x, ptr readonly %y) { |
| ; CHECK-LABEL: define void @reduc_cond_store( |
| ; CHECK-SAME: ptr [[T:%.*]], ptr readonly [[X:%.*]], ptr readonly [[Y:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: store i32 0, ptr [[T]], align 4 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[SUM_2:%.*]], %[[IF_END:.*]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[IF_END]] ] |
| ; CHECK-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_Y]], align 4 |
| ; CHECK-NEXT: [[GEP_X:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[GEP_X]], align 4 |
| ; CHECK-NEXT: [[DIFF:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] |
| ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[DIFF]], 0 |
| ; CHECK-NEXT: br i1 [[CMP2]], label %[[IF_THEN:.*]], label %[[IF_END]] |
| ; CHECK: [[IF_THEN]]: |
| ; CHECK-NEXT: [[SUM_1:%.*]] = add nsw i32 [[DIFF]], [[SUM]] |
| ; CHECK-NEXT: store i32 [[SUM_1]], ptr [[T]], align 4 |
| ; CHECK-NEXT: br label %[[IF_END]] |
| ; CHECK: [[IF_END]]: |
| ; CHECK-NEXT: [[SUM_2]] = phi i32 [ [[SUM_1]], %[[IF_THEN]] ], [ [[TMP0]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[FOR_END]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| store i32 0, ptr %t, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %sum.2, %if.end ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %if.end ] |
| %gep.y = getelementptr inbounds i32, ptr %y, i64 %iv |
| %0 = load i32, ptr %gep.y, align 4 |
| %gep.x = getelementptr inbounds i32, ptr %x, i64 %iv |
| %1 = load i32, ptr %gep.x, align 4 |
| %diff = sub nsw i32 %0, %1 |
| %cmp2 = icmp sgt i32 %diff, 0 |
| br i1 %cmp2, label %if.then, label %if.end |
| |
| if.then: |
| %sum.1 = add nsw i32 %diff, %sum |
| store i32 %sum.1, ptr %t, align 4 |
| br label %if.end |
| |
| if.end: |
| %sum.2 = phi i32 [ %sum.1, %if.then ], [ %0, %for.body ] |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %for.end, label %for.body |
| |
| for.end: |
| ret void |
| } |
| |
| ; Check that we can vectorize code with several stores to an invariant address |
| ; with condition that final reduction value is stored too. |
| ; |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i+=2) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; sum += src[i+1]; |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store_inside_unrolled(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_inside_unrolled( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 2, i64 4, i64 6>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP34:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 |
| ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 2 |
| ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6 |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP0]] |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP1]] |
| ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP2]] |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP3]] |
| ; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 4, !alias.scope [[META16:![0-9]+]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP8]], i32 0 |
| ; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 1 |
| ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 2 |
| ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 3 |
| ; CHECK-NEXT: [[TMP16:%.*]] = add <4 x i32> [[TMP15]], [[VEC_PHI]] |
| ; CHECK-NEXT: [[TMP17:%.*]] = or disjoint <4 x i64> [[VEC_IND]], splat (i64 1) |
| ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP17]], i32 0 |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP18]] |
| ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP17]], i32 1 |
| ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP20]] |
| ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP17]], i32 2 |
| ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP22]] |
| ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP17]], i32 3 |
| ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP24]] |
| ; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP19]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP21]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP23]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP25]], align 4, !alias.scope [[META16]] |
| ; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP26]], i32 0 |
| ; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP27]], i32 1 |
| ; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> [[TMP31]], i32 [[TMP28]], i32 2 |
| ; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 3 |
| ; CHECK-NEXT: [[TMP34]] = add <4 x i32> [[TMP33]], [[TMP16]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) |
| ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 |
| ; CHECK-NEXT: br i1 [[TMP35]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP34]]) |
| ; CHECK-NEXT: store i32 [[TMP36]], ptr [[GEP_DST]], align 4, !alias.scope [[META20:![0-9]+]], !noalias [[META16]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %sum = phi i32 [ 0, %entry ], [ %sum.2, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %sum.1 = add nsw i32 %0, %sum |
| store i32 %sum.1, ptr %gep.dst, align 4 |
| %1 = or disjoint i64 %iv, 1 |
| %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1 |
| %2 = load i32, ptr %gep.src.1, align 4 |
| %sum.2 = add nsw i32 %2, %sum.1 |
| store i32 %sum.2, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 2 |
| %cmp = icmp slt i64 %iv.next, 1000 |
| br i1 %cmp, label %for.body, label %exit |
| |
| exit: |
| ret void |
| } |
| |
| ; Check that we cannot vectorize code if stored value is not the final reduction |
| ; value |
| ; |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i++) { |
| ; sum += src[i]; |
| ; dst[42] = sum + 1; |
| ; } |
| define void @reduc_store_not_final_value(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_not_final_value( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_SRC]], align 4 |
| ; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM]], [[TMP0]] |
| ; CHECK-NEXT: [[SUM_PLUS_ONE:%.*]] = add i32 [[ADD]], 1 |
| ; CHECK-NEXT: store i32 [[SUM_PLUS_ONE]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| store i32 0, ptr %gep.dst, align 4 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| %sum_plus_one = add i32 %add, 1 |
| store i32 %sum_plus_one, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; We cannot vectorize if two (or more) invariant stores exist in a loop. |
| ; |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i+=2) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; sum += src[i+1]; |
| ; other_dst[42] = sum; |
| ; } |
| define void @reduc_double_invariant_store(ptr %dst, ptr %other_dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_double_invariant_store( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr [[OTHER_DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: [[GEP_OTHER_DST:%.*]] = getelementptr inbounds i32, ptr [[OTHER_DST]], i64 42 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[SUM_2:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; CHECK-NEXT: [[SUM_1:%.*]] = add nsw i32 [[TMP0]], [[SUM]] |
| ; CHECK-NEXT: store i32 [[SUM_1]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[IV]], 1 |
| ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP1]] |
| ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 |
| ; CHECK-NEXT: [[SUM_2]] = add nsw i32 [[TMP2]], [[SUM_1]] |
| ; CHECK-NEXT: store i32 [[SUM_2]], ptr [[GEP_OTHER_DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT:.*]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| %gep.other_dst = getelementptr inbounds i32, ptr %other_dst, i64 42 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %sum = phi i32 [ 0, %entry ], [ %sum.2, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %sum.1 = add nsw i32 %0, %sum |
| store i32 %sum.1, ptr %gep.dst, align 4 |
| %1 = or disjoint i64 %iv, 1 |
| %arrayidx4 = getelementptr inbounds i32, ptr %src, i64 %1 |
| %2 = load i32, ptr %arrayidx4, align 4 |
| %sum.2 = add nsw i32 %2, %sum.1 |
| store i32 %sum.2, ptr %gep.other_dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 2 |
| %cmp = icmp slt i64 %iv.next, 1000 |
| br i1 %cmp, label %for.body, label %exit |
| |
| exit: |
| ret void |
| } |
| |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i+=2) { |
| ; sum += src[i]; |
| ; if (src[i+1] > 0) |
| ; dst[42] = sum; |
| ; sum += src[i+1]; |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store_middle_store_predicated(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_middle_store_predicated( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 2, i64 4, i64 6>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP34:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 |
| ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 2 |
| ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6 |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP0]] |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP1]] |
| ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP2]] |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP3]] |
| ; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 4, !alias.scope [[META23:![0-9]+]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP8]], i32 0 |
| ; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 1 |
| ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 2 |
| ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 3 |
| ; CHECK-NEXT: [[TMP16:%.*]] = add <4 x i32> [[TMP15]], [[VEC_PHI]] |
| ; CHECK-NEXT: [[TMP17:%.*]] = or disjoint <4 x i64> [[VEC_IND]], splat (i64 1) |
| ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP17]], i32 0 |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP18]] |
| ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP17]], i32 1 |
| ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP20]] |
| ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP17]], i32 2 |
| ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP22]] |
| ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP17]], i32 3 |
| ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP24]] |
| ; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP19]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP21]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP23]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP25]], align 4, !alias.scope [[META23]] |
| ; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP26]], i32 0 |
| ; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP27]], i32 1 |
| ; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> [[TMP31]], i32 [[TMP28]], i32 2 |
| ; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 3 |
| ; CHECK-NEXT: [[TMP34]] = add <4 x i32> [[TMP33]], [[TMP16]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) |
| ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 |
| ; CHECK-NEXT: br i1 [[TMP35]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP34]]) |
| ; CHECK-NEXT: store i32 [[TMP36]], ptr [[GEP_DST]], align 4, !alias.scope [[META27:![0-9]+]], !noalias [[META23]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| br label %for.body |
| |
| for.body: ; preds = %latch, %entry |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] |
| %sum = phi i32 [ 0, %entry ], [ %sum.2, %latch ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %sum.1 = add nsw i32 %0, %sum |
| %cmp = icmp sgt i32 %0, 0 |
| br i1 %cmp, label %predicated, label %latch |
| |
| predicated: ; preds = %for.body |
| store i32 %sum.1, ptr %gep.dst, align 4 |
| br label %latch |
| |
| latch: ; preds = %predicated, %for.body |
| %1 = or disjoint i64 %iv, 1 |
| %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1 |
| %2 = load i32, ptr %gep.src.1, align 4 |
| %sum.2 = add nsw i32 %2, %sum.1 |
| store i32 %sum.2, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 2 |
| %cmp.1 = icmp slt i64 %iv.next, 1000 |
| br i1 %cmp.1, label %for.body, label %exit |
| |
| exit: ; preds = %latch |
| ret void |
| } |
| |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i+=2) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; sum += src[i+1]; |
| ; if (src[i+1] > 0) |
| ; dst[42] = sum; |
| ; } |
| define void @reduc_store_final_store_predicated(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_final_store_predicated( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[SUM_1:%.*]], %[[LATCH]] ] |
| ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| ; CHECK-NEXT: [[SUM_1]] = add nsw i32 [[TMP0]], [[SUM]] |
| ; CHECK-NEXT: store i32 [[SUM_1]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[IV]], 1 |
| ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP1]] |
| ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_SRC_1]], align 4 |
| ; CHECK-NEXT: [[SUM_2:%.*]] = add nsw i32 [[TMP2]], [[SUM_1]] |
| ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP2]], 0 |
| ; CHECK-NEXT: br i1 [[CMP1]], label %[[PREDICATED:.*]], label %[[LATCH]] |
| ; CHECK: [[PREDICATED]]: |
| ; CHECK-NEXT: store i32 [[SUM_2]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: br label %[[LATCH]] |
| ; CHECK: [[LATCH]]: |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT:.*]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| br label %for.body |
| |
| for.body: ; preds = %latch, %entry |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] |
| %sum = phi i32 [ 0, %entry ], [ %sum.1, %latch ] |
| %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %sum.1 = add nsw i32 %0, %sum |
| store i32 %sum.1, ptr %gep.dst, align 4 |
| %1 = or disjoint i64 %iv, 1 |
| %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1 |
| %2 = load i32, ptr %gep.src.1, align 4 |
| %sum.2 = add nsw i32 %2, %sum.1 |
| %cmp1 = icmp sgt i32 %2, 0 |
| br i1 %cmp1, label %predicated, label %latch |
| |
| predicated: ; preds = %for.body |
| store i32 %sum.2, ptr %gep.dst, align 4 |
| br label %latch |
| |
| latch: ; preds = %predicated, %for.body |
| %iv.next = add nuw nsw i64 %iv, 2 |
| %cmp = icmp slt i64 %iv.next, 1000 |
| br i1 %cmp, label %for.body, label %exit |
| |
| exit: ; preds = %latch |
| ret void |
| } |
| |
| ; Final reduction value is overwritten inside loop |
| ; |
| ; for(int i=0; i < 1000; i++) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; dst[42] = 0; |
| ; } |
| define void @reduc_store_final_store_overwritten(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_final_store_overwritten( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_SRC]], align 4 |
| ; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM]], [[TMP0]] |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| store i32 %add, ptr %gep.dst, align 4 |
| store i32 0, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Final value used outside of loop does not prevent vectorization |
| ; |
| ; int sum = 0; |
| ; for(int i=0; i < 1000; i++) { |
| ; sum += src[i]; |
| ; dst[42] = sum; |
| ; } |
| ; dst[43] = sum; |
| define void @reduc_store_inoutside(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_inoutside( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[GEP_DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META30:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = add <4 x i32> [[WIDE_LOAD]], [[VEC_PHI]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: store i32 [[TMP3]], ptr [[GEP_DST]], align 4, !alias.scope [[META34:![0-9]+]], !noalias [[META30]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| br label %for.body |
| |
| for.body: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %sum = phi i32 [ 0, %entry ], [ %sum.1, %for.body ] |
| %arrayidx = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %arrayidx, align 4 |
| %sum.1 = add nsw i32 %0, %sum |
| store i32 %sum.1, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| %sum.lcssa = phi i32 [ %sum.1, %for.body ] |
| %gep.dst.1 = getelementptr inbounds i32, ptr %dst, i64 43 |
| store i32 %sum.lcssa, ptr %gep.dst.1, align 4 |
| ret void |
| } |
| |
| ; Test for PR55540. |
| define void @test_drop_poison_generating_dead_recipe(ptr %dst) { |
| ; CHECK-LABEL: define void @test_drop_poison_generating_dead_recipe( |
| ; CHECK-SAME: ptr [[DST:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP0:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0]] = add <4 x i64> [[VEC_PHI]], splat (i64 -31364) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 360 |
| ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP0]]) |
| ; CHECK-NEXT: store i64 [[TMP2]], ptr [[DST]], align 8 |
| ; CHECK-NEXT: br label %[[SCALAR_PH]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %body |
| |
| body: |
| %red = phi i64 [ 0, %entry ], [ %red.next, %body ] |
| %iv = phi i32 [ 2, %entry ], [ %iv.next, %body ] |
| %add.1 = add nuw i64 %red, -23523 |
| store i64 %add.1, ptr %dst, align 8 |
| %red.next = add nuw i64 %red, -31364 |
| store i64 %red.next, ptr %dst, align 8 |
| %iv.next = add nuw nsw i32 %iv, 1 |
| %ec = icmp ugt i32 %iv, 363 |
| br i1 %ec, label %exit, label %body |
| |
| exit: |
| ret void |
| } |
| |
| define void @reduc_store_invariant_addr_not_hoisted(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_store_invariant_addr_not_hoisted( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_SRC]], align 4 |
| ; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM]], [[TMP0]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| store i32 %add, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Make sure we can vectorize loop with a non-reduction value stored to an |
| ; invariant address that is calculated inside loop. |
| define i32 @non_reduc_store_invariant_addr_not_hoisted(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define i32 @non_reduc_store_invariant_addr_not_hoisted( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[DST]], i64 168 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 172 |
| ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 42 |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META39:![0-9]+]] |
| ; CHECK-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: store i32 0, ptr [[TMP0]], align 4, !alias.scope [[META42:![0-9]+]], !noalias [[META39]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]]) |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: ; preds = %for.body, %entry |
| %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %add = add nsw i32 %sum, %0 |
| %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42 |
| store i32 0, ptr %gep.dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: ; preds = %for.body |
| %add.lcssa = phi i32 [ %add, %for.body ] |
| ret i32 %add.lcssa |
| } |
| |
| ; Make sure that if there are several reductions in the loop, the order of invariant stores sank outside of the loop is preserved |
| ; See https://github.com/llvm/llvm-project/issues/64047 |
| define void @reduc_add_mul_store_same_ptr(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_add_mul_store_same_ptr( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META46:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[TMP2]] = mul <4 x i32> [[VEC_PHI2]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP49:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP2]]) |
| ; CHECK-NEXT: store i32 [[TMP4]], ptr [[DST]], align 4, !alias.scope [[META50:![0-9]+]], !noalias [[META46]] |
| ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DST]], align 4, !alias.scope [[META50]], !noalias [[META46]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %sum.next, %for.body ] |
| %mul = phi i32 [ 1, %entry ], [ %mul.next, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %sum.next = add nsw i32 %sum, %0 |
| store i32 %sum.next, ptr %dst, align 4 |
| %mul.next = mul nsw i32 %mul, %0 |
| store i32 %mul.next, ptr %dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| define void @reduc_mul_add_store_same_ptr(ptr %dst, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_mul_add_store_same_ptr( |
| ; CHECK-SAME: ptr [[DST:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META53:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = mul <4 x i32> [[VEC_PHI2]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP56:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]]) |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DST]], align 4, !alias.scope [[META57:![0-9]+]], !noalias [[META53]] |
| ; CHECK-NEXT: store i32 [[TMP4]], ptr [[DST]], align 4, !alias.scope [[META57]], !noalias [[META53]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %sum.next, %for.body ] |
| %mul = phi i32 [ 1, %entry ], [ %mul.next, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %mul.next = mul nsw i32 %mul, %0 |
| store i32 %mul.next, ptr %dst, align 4 |
| %sum.next = add nsw i32 %sum, %0 |
| store i32 %sum.next, ptr %dst, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| ; Same as above but storing is done to two different pointers and they can be aliased |
| define void @reduc_add_mul_store_different_ptr(ptr %dst1, ptr %dst2, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_add_mul_store_different_ptr( |
| ; CHECK-SAME: ptr [[DST1:%.*]], ptr [[DST2:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST1]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST2]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST1]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[DST2]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[DST1]], [[SCEVGEP2]] |
| ; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] |
| ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] |
| ; CHECK-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[DST2]], [[SCEVGEP2]] |
| ; CHECK-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]] |
| ; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT8]] |
| ; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META60:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[TMP2]] = mul <4 x i32> [[VEC_PHI10]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP63:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP2]]) |
| ; CHECK-NEXT: store i32 [[TMP4]], ptr [[DST1]], align 4, !alias.scope [[META64:![0-9]+]], !noalias [[META66:![0-9]+]] |
| ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DST2]], align 4, !alias.scope [[META68:![0-9]+]], !noalias [[META60]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %sum.next, %for.body ] |
| %mul = phi i32 [ 1, %entry ], [ %mul.next, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %sum.next = add nsw i32 %sum, %0 |
| store i32 %sum.next, ptr %dst1, align 4 |
| %mul.next = mul nsw i32 %mul, %0 |
| store i32 %mul.next, ptr %dst2, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |
| |
| define void @reduc_mul_add_store_different_ptr(ptr %dst1, ptr %dst2, ptr readonly %src) { |
| ; CHECK-LABEL: define void @reduc_mul_add_store_different_ptr( |
| ; CHECK-SAME: ptr [[DST1:%.*]], ptr [[DST2:%.*]], ptr readonly [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST1]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST2]], i64 4 |
| ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 4000 |
| ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST1]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[DST2]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] |
| ; CHECK-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[DST1]], [[SCEVGEP2]] |
| ; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] |
| ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] |
| ; CHECK-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[DST2]], [[SCEVGEP2]] |
| ; CHECK-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]] |
| ; CHECK-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]] |
| ; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT8]] |
| ; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META70:![0-9]+]] |
| ; CHECK-NEXT: [[TMP1]] = mul <4 x i32> [[VEC_PHI10]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP73:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]]) |
| ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP1]]) |
| ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DST1]], align 4, !alias.scope [[META74:![0-9]+]], !noalias [[META76:![0-9]+]] |
| ; CHECK-NEXT: store i32 [[TMP4]], ptr [[DST2]], align 4, !alias.scope [[META78:![0-9]+]], !noalias [[META70]] |
| ; CHECK-NEXT: br [[EXIT:label %.*]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %sum = phi i32 [ 0, %entry ], [ %sum.next, %for.body ] |
| %mul = phi i32 [ 1, %entry ], [ %mul.next, %for.body ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv |
| %0 = load i32, ptr %gep.src, align 4 |
| %mul.next = mul nsw i32 %mul, %0 |
| store i32 %mul.next, ptr %dst1, align 4 |
| %sum.next = add nsw i32 %sum, %0 |
| store i32 %sum.next, ptr %dst2, align 4 |
| %iv.next = add nuw nsw i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %for.body |
| |
| exit: |
| ret void |
| } |