| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 4 |
| ; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -enable-epilogue-vectorization=false -S < %s | FileCheck %s --check-prefixes=CHECK-INTERLEAVE1 |
| ; RUN: opt -passes=loop-vectorize -enable-epilogue-vectorization=false -S < %s | FileCheck %s --check-prefixes=CHECK-INTERLEAVED |
| ; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -vectorizer-maximize-bandwidth -enable-epilogue-vectorization=false -S < %s | FileCheck %s --check-prefixes=CHECK-MAXBW |
| |
| target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" |
| target triple = "aarch64-none-unknown-elf" |
| |
| define i32 @dotp(ptr %a, ptr %b) #0 { |
| ; CHECK-INTERLEAVE1-LABEL: define i32 @dotp( |
| ; CHECK-INTERLEAVE1-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-INTERLEAVE1-NEXT: entry: |
| ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] |
| ; CHECK-INTERLEAVE1: vector.ph: |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 4 |
| ; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] |
| ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] |
| ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK-INTERLEAVE1: vector.body: |
| ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] |
| ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] |
| ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]] |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP6]] |
| ; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP11]]) |
| ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK-INTERLEAVE1: middle.block: |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) |
| ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] |
| ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] |
| ; CHECK-INTERLEAVE1: scalar.ph: |
| ; |
| ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( |
| ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-INTERLEAVED-NEXT: entry: |
| ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] |
| ; CHECK-INTERLEAVED: vector.ph: |
| ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP0]], 4 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP4]], 1 |
| ; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] |
| ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] |
| ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK-INTERLEAVED: vector.body: |
| ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[TMP4]] |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP5]], align 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP4]] |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP14]], align 1 |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <vscale x 16 x i32> [[TMP10]], [[TMP11]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP12]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP13]]) |
| ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul <vscale x 16 x i32> [[TMP21]], [[TMP15]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP16]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP17]]) |
| ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK-INTERLEAVED: middle.block: |
| ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) |
| ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] |
| ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] |
| ; CHECK-INTERLEAVED: scalar.ph: |
| ; |
| ; CHECK-MAXBW-LABEL: define i32 @dotp( |
| ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-MAXBW-NEXT: entry: |
| ; CHECK-MAXBW-NEXT: br label [[ENTRY:%.*]] |
| ; CHECK-MAXBW: vector.ph: |
| ; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 4 |
| ; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] |
| ; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] |
| ; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]] |
| ; CHECK-MAXBW: vector.body: |
| ; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[ENTRY]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] |
| ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] |
| ; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1 |
| ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]] |
| ; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 |
| ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32> |
| ; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> |
| ; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]] |
| ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP6]] |
| ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP8]]) |
| ; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] |
| ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] |
| ; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK-MAXBW: middle.block: |
| ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]]) |
| ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] |
| ; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] |
| ; CHECK-MAXBW: scalar.ph: |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: ; preds = %for.body, %entry |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] |
| %gep.a = getelementptr i8, ptr %a, i64 %iv |
| %load.a = load i8, ptr %gep.a, align 1 |
| %ext.a = zext i8 %load.a to i32 |
| %gep.b = getelementptr i8, ptr %b, i64 %iv |
| %load.b = load i8, ptr %gep.b, align 1 |
| %ext.b = zext i8 %load.b to i32 |
| %mul = mul i32 %ext.b, %ext.a |
| %sub = sub i32 0, %mul |
| %add = add i32 %accum, %sub |
| %iv.next = add i64 %iv, 1 |
| %exitcond.not = icmp eq i64 %iv.next, 1024 |
| br i1 %exitcond.not, label %for.exit, label %for.body |
| |
| for.exit: ; preds = %for.body |
| ret i32 %add |
| } |
| |
| define i64 @partial_reduce_sub_sext_mul(ptr %x) #1 { |
| ; CHECK-INTERLEAVE1-LABEL: define i64 @partial_reduce_sub_sext_mul( |
| ; CHECK-INTERLEAVE1-SAME: ptr [[X:%.*]]) #[[ATTR1:[0-9]+]] { |
| ; CHECK-INTERLEAVE1-NEXT: entry: |
| ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] |
| ; CHECK-INTERLEAVE1: vector.ph: |
| ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK-INTERLEAVE1: vector.body: |
| ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVE1-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STRIDED_VEC:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1 |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP0]] |
| ; CHECK-INTERLEAVE1-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 |
| ; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[STRIDED_VEC]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = sext <4 x i32> [[STRIDED_VEC]] to <4 x i64> |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul <4 x i64> [[TMP3]], [[TMP4]] |
| ; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI]], <4 x i64> [[TMP5]]) |
| ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 36 |
| ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK-INTERLEAVE1: middle.block: |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) |
| ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = sub i64 0, [[TMP8]] |
| ; CHECK-INTERLEAVE1-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3 |
| ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] |
| ; CHECK-INTERLEAVE1: scalar.ph: |
| ; |
| ; CHECK-INTERLEAVED-LABEL: define i64 @partial_reduce_sub_sext_mul( |
| ; CHECK-INTERLEAVED-SAME: ptr [[X:%.*]]) #[[ATTR1:[0-9]+]] { |
| ; CHECK-INTERLEAVED-NEXT: entry: |
| ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] |
| ; CHECK-INTERLEAVED: vector.ph: |
| ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK-INTERLEAVED: vector.body: |
| ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STRIDED_VEC3:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE12:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 4 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 8 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 12 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = add i64 [[TMP0]], 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = add i64 [[TMP16]], 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = add i64 [[TMP17]], 1 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP1]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP2]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP23]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP6]] |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 |
| ; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 |
| ; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC6:%.*]] = load <8 x i32>, ptr [[TMP24]], align 4 |
| ; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC7:%.*]] = shufflevector <8 x i32> [[WIDE_VEC6]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC8:%.*]] = load <8 x i32>, ptr [[TMP25]], align 4 |
| ; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC3]] = shufflevector <8 x i32> [[WIDE_VEC8]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[STRIDED_VEC]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC]], <4 x i32> [[STRIDED_VEC5]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC5]], <4 x i32> [[STRIDED_VEC7]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC7]], <4 x i32> [[STRIDED_VEC3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext <4 x i32> [[TMP5]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[STRIDED_VEC]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul <4 x i64> [[TMP7]], [[TMP9]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI]], <4 x i64> [[TMP11]]) |
| ; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext <4 x i32> [[TMP31]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <4 x i32> [[STRIDED_VEC5]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul <4 x i64> [[TMP19]], [[TMP20]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI1]], <4 x i64> [[TMP21]]) |
| ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = sext <4 x i32> [[TMP13]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext <4 x i32> [[STRIDED_VEC7]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <4 x i64> [[TMP8]], [[TMP10]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI2]], <4 x i64> [[TMP12]]) |
| ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <4 x i32> [[TMP14]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <4 x i32> [[STRIDED_VEC3]] to <4 x i64> |
| ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul <4 x i64> [[TMP27]], [[TMP28]] |
| ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE12]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI3]], <4 x i64> [[TMP29]]) |
| ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 |
| ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK-INTERLEAVED: middle.block: |
| ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE10]], [[PARTIAL_REDUCE]] |
| ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX13:%.*]] = add <2 x i64> [[PARTIAL_REDUCE11]], [[BIN_RDX]] |
| ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX14:%.*]] = add <2 x i64> [[PARTIAL_REDUCE12]], [[BIN_RDX13]] |
| ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX14]]) |
| ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = sub i64 0, [[TMP32]] |
| ; CHECK-INTERLEAVED-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[STRIDED_VEC3]], i32 3 |
| ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] |
| ; CHECK-INTERLEAVED: scalar.ph: |
| ; |
| ; CHECK-MAXBW-LABEL: define i64 @partial_reduce_sub_sext_mul( |
| ; CHECK-MAXBW-SAME: ptr [[X:%.*]]) #[[ATTR1:[0-9]+]] { |
| ; CHECK-MAXBW-NEXT: entry: |
| ; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]] |
| ; CHECK-MAXBW: vector.ph: |
| ; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK-MAXBW: vector.body: |
| ; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-MAXBW-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STRIDED_VEC:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1 |
| ; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = getelementptr [2 x i32], ptr [[X]], i64 [[TMP0]] |
| ; CHECK-MAXBW-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 |
| ; CHECK-MAXBW-NEXT: [[STRIDED_VEC]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| ; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[STRIDED_VEC]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| ; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64> |
| ; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = sext <4 x i32> [[STRIDED_VEC]] to <4 x i64> |
| ; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul <4 x i64> [[TMP3]], [[TMP4]] |
| ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI]], <4 x i64> [[TMP5]]) |
| ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 36 |
| ; CHECK-MAXBW-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK-MAXBW: middle.block: |
| ; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) |
| ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = sub i64 0, [[TMP8]] |
| ; CHECK-MAXBW-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3 |
| ; CHECK-MAXBW-NEXT: br label [[SCALAR_PH:%.*]] |
| ; CHECK-MAXBW: scalar.ph: |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %prev = phi i32 [ 0, %entry ], [ %load, %loop ] |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %accum = phi i64 [ 0, %entry ], [ %sub, %loop ] |
| %iv.next = add i64 %iv, 1 |
| %gep = getelementptr [2 x i32], ptr %x, i64 %iv.next |
| %load = load i32, ptr %gep, align 4 |
| %ext.prev = sext i32 %prev to i64 |
| %ext.load = sext i32 %load to i64 |
| %mul = mul i64 %ext.prev, %ext.load |
| %sub = sub i64 %accum, %mul |
| %exitcond.not = icmp eq i64 %iv, 38 |
| br i1 %exitcond.not, label %exit, label %loop |
| |
| exit: |
| ret i64 %sub |
| } |
| |
| !7 = distinct !{!7, !8, !9, !10} |
| !8 = !{!"llvm.loop.mustprogress"} |
| !9 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} |
| !10 = !{!"llvm.loop.vectorize.enable", i1 true} |
| attributes #0 = { vscale_range(1,16) "target-features"="+sve" } |
| attributes #1 = { "target-cpu"="apple-m1" } |