| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s |
| |
| target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2" |
| target triple = "x86_64-apple-macos" |
| |
| ; Both %l3 and the earlier store to %gep.iv.2 access the same location. %l1 |
| ; cannot be added safely to the same interleave group as %l2 and %l3, because |
| ; that would mean %l2 and %l3 would need to be hoisted across the store. |
| define void @pr63602_1(ptr %arr) { |
| ; CHECK-LABEL: define void @pr63602_1 |
| ; CHECK-SAME: (ptr [[ARR:%.*]]) { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 |
| ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[TMP0]] |
| ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INDEX]], 3 |
| ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = add i64 4, [[TMP2]] |
| ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX2]], 0 |
| ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX2]], 3 |
| ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX2]], 6 |
| ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX2]], 9 |
| ; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP1]], 4 |
| ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP7]] |
| ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP8]], align 4 |
| ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9> |
| ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP3]] |
| ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP4]] |
| ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP5]] |
| ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP6]] |
| ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 0 |
| ; CHECK-NEXT: store i32 [[TMP14]], ptr [[TMP10]], align 4 |
| ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 1 |
| ; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP11]], align 4 |
| ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 2 |
| ; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP12]], align 4 |
| ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3 |
| ; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4 |
| ; CHECK-NEXT: [[TMP18:%.*]] = add nuw nsw i64 [[TMP1]], 2 |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP18]] |
| ; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <12 x i32>, ptr [[TMP19]], align 4 |
| ; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <12 x i32> [[WIDE_VEC3]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9> |
| ; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <12 x i32> [[WIDE_VEC3]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10> |
| ; CHECK-NEXT: [[TMP21:%.*]] = add <4 x i32> [[STRIDED_VEC5]], [[STRIDED_VEC4]] |
| ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP21]], i32 0 |
| ; CHECK-NEXT: store i32 [[TMP22]], ptr [[TMP10]], align 4 |
| ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[TMP21]], i32 1 |
| ; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP11]], align 4 |
| ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[TMP21]], i32 2 |
| ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP12]], align 4 |
| ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i32> [[TMP21]], i32 3 |
| ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP13]], align 4 |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 |
| ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ] |
| ; CHECK-NEXT: br label [[LOOP:%.*]] |
| ; CHECK: loop: |
| ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ] |
| ; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ] |
| ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 3 |
| ; CHECK-NEXT: [[IV_1_PLUS_4:%.*]] = add nuw nsw i64 [[IV_1]], 4 |
| ; CHECK-NEXT: [[GEP_IV_1_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_4]] |
| ; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_4]], align 4 |
| ; CHECK-NEXT: [[GEP_IV_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_2]] |
| ; CHECK-NEXT: store i32 [[L1]], ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[IV_1_PLUS_2:%.*]] = add nuw nsw i64 [[IV_1]], 2 |
| ; CHECK-NEXT: [[GEP_IV_1_PLUS_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_2]] |
| ; CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_2]], align 4 |
| ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L3]], [[L2]] |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3 |
| ; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50 |
| ; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: exit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop ] |
| %iv.2 = phi i64 [ 4, %entry ], [ %iv.2.next, %loop ] |
| %iv.1.next = add nuw nsw i64 %iv.1, 3 |
| %iv.1.plus.4 = add nuw nsw i64 %iv.1, 4 |
| %gep.iv.1.plus.4 = getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.4 |
| %l1 = load i32, ptr %gep.iv.1.plus.4 |
| %gep.iv.2 = getelementptr inbounds i32, ptr %arr, i64 %iv.2 |
| store i32 %l1, ptr %gep.iv.2 |
| %iv.1.plus.2 = add nuw nsw i64 %iv.1, 2 |
| %gep.iv.1.plus.2= getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.2 |
| %l2 = load i32, ptr %gep.iv.1.plus.2 |
| %l3 = load i32, ptr %gep.iv.2 |
| %add = add i32 %l3 , %l2 |
| store i32 %add, ptr %gep.iv.2 |
| %iv.2.next = add nuw nsw i64 %iv.2, 3 |
| %icmp = icmp ugt i64 %iv.2, 50 |
| br i1 %icmp, label %exit, label %loop |
| |
| exit: |
| ret void |
| } |
| |
| ; %l3 and the preceeding store access the same memory location. So, we cannot |
| ; have the loads %l1, %l2 and %l3 in the same interleave group since it would |
| ; mean hoisting the load %l2 and %l3 across the store. |
| |
| ; Unlike the above case, since we go through the last load in program order and |
| ; compare against the obstructing stores (%l2 versus the store) there is no |
| ; dependency. However, the other load in %l2's interleave group (%l3) does |
| ; obstruct with the store. |
| define void @pr63602_2(ptr %arr) { |
| ; CHECK-LABEL: define void @pr63602_2 |
| ; CHECK-SAME: (ptr [[ARR:%.*]]) { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| ; CHECK: vector.ph: |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3 |
| ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[TMP0]] |
| ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0 |
| ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 3 |
| ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6 |
| ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 9 |
| ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[INDEX]], 3 |
| ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = add i64 4, [[TMP5]] |
| ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX2]], 0 |
| ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX2]], 3 |
| ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX2]], 6 |
| ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX2]], 9 |
| ; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[TMP1]], 4 |
| ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP10]] |
| ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP11]], align 4 |
| ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9> |
| ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP6]] |
| ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP7]] |
| ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP8]] |
| ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP9]] |
| ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 0 |
| ; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4 |
| ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 1 |
| ; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP14]], align 4 |
| ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 2 |
| ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP15]], align 4 |
| ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3 |
| ; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP16]], align 4 |
| ; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw i64 [[TMP1]], 2 |
| ; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw i64 [[TMP2]], 2 |
| ; CHECK-NEXT: [[TMP23:%.*]] = add nuw nsw i64 [[TMP3]], 2 |
| ; CHECK-NEXT: [[TMP24:%.*]] = add nuw nsw i64 [[TMP4]], 2 |
| ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP21]] |
| ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP22]] |
| ; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP23]] |
| ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP24]] |
| ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP13]], align 4 |
| ; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP14]], align 4 |
| ; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP15]], align 4 |
| ; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4 |
| ; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP29]], i32 0 |
| ; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 1 |
| ; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP31]], i32 2 |
| ; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP32]], i32 3 |
| ; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP25]], align 4 |
| ; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP26]], align 4 |
| ; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP27]], align 4 |
| ; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP28]], align 4 |
| ; CHECK-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> poison, i32 [[TMP37]], i32 0 |
| ; CHECK-NEXT: [[TMP42:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP38]], i32 1 |
| ; CHECK-NEXT: [[TMP43:%.*]] = insertelement <4 x i32> [[TMP42]], i32 [[TMP39]], i32 2 |
| ; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[TMP40]], i32 3 |
| ; CHECK-NEXT: [[TMP45:%.*]] = add <4 x i32> [[TMP36]], [[TMP44]] |
| ; CHECK-NEXT: [[TMP46:%.*]] = extractelement <4 x i32> [[TMP45]], i32 0 |
| ; CHECK-NEXT: store i32 [[TMP46]], ptr [[TMP13]], align 4 |
| ; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i32> [[TMP45]], i32 1 |
| ; CHECK-NEXT: store i32 [[TMP47]], ptr [[TMP14]], align 4 |
| ; CHECK-NEXT: [[TMP48:%.*]] = extractelement <4 x i32> [[TMP45]], i32 2 |
| ; CHECK-NEXT: store i32 [[TMP48]], ptr [[TMP15]], align 4 |
| ; CHECK-NEXT: [[TMP49:%.*]] = extractelement <4 x i32> [[TMP45]], i32 3 |
| ; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP16]], align 4 |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 |
| ; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK: middle.block: |
| ; CHECK-NEXT: br label [[SCALAR_PH]] |
| ; CHECK: scalar.ph: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ] |
| ; CHECK-NEXT: br label [[LOOP:%.*]] |
| ; CHECK: loop: |
| ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ] |
| ; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ] |
| ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 3 |
| ; CHECK-NEXT: [[IV_1_PLUS_4:%.*]] = add nuw nsw i64 [[IV_1]], 4 |
| ; CHECK-NEXT: [[GEP_IV_1_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_4]] |
| ; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_4]], align 4 |
| ; CHECK-NEXT: [[GEP_IV_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_2]] |
| ; CHECK-NEXT: store i32 [[L1]], ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[IV_1_PLUS_2:%.*]] = add nuw nsw i64 [[IV_1]], 2 |
| ; CHECK-NEXT: [[GEP_IV_1_PLUS_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_2]] |
| ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_2]], align 4 |
| ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L3]], [[L2]] |
| ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4 |
| ; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3 |
| ; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50 |
| ; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; CHECK: exit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop ] |
| %iv.2 = phi i64 [ 4, %entry ], [ %iv.2.next, %loop ] |
| %iv.1.next = add nuw nsw i64 %iv.1, 3 |
| %iv.1.plus.4 = add nuw nsw i64 %iv.1, 4 |
| %gep.iv.1.plus.4 = getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.4 |
| %l1 = load i32, ptr %gep.iv.1.plus.4 |
| %gep.iv.2 = getelementptr inbounds i32, ptr %arr, i64 %iv.2 |
| store i32 %l1, ptr %gep.iv.2 |
| %iv.1.plus.2 = add nuw nsw i64 %iv.1, 2 |
| %gep.iv.1.plus.2= getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.2 |
| %l3 = load i32, ptr %gep.iv.2 |
| %l2 = load i32, ptr %gep.iv.1.plus.2 |
| %add = add i32 %l3 , %l2 |
| store i32 %add, ptr %gep.iv.2 |
| %iv.2.next = add nuw nsw i64 %iv.2, 3 |
| %icmp = icmp ugt i64 %iv.2, 50 |
| br i1 %icmp, label %exit, label %loop |
| |
| exit: |
| ret void |
| } |