| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5 |
| ; RUN: opt -passes=loop-vectorize -scalable-vectorization=off -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s |
| |
| ; NOTE: These tests aren't really target-specific, but it's convenient to target AArch64 |
| ; so that TTI.isLegalMaskedLoad can return true. |
| |
| target triple = "aarch64-linux-gnu" |
| |
| ; The original loop had an unconditional uniform load. Let's make sure |
| ; we don't artificially create new predicated blocks for the load. |
| define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #0 { |
| ; CHECK-LABEL: define void @uniform_load( |
| ; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[N_MINUS_VF:%.*]] = sub i64 [[N]], 4 |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[N]], 4 |
| ; CHECK-NEXT: [[N2:%.*]] = select i1 [[CMP]], i64 [[N_MINUS_VF]], i64 0 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[N]]) |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[SRC]], align 4 |
| ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i64 0 |
| ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IDX]] |
| ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP5]], ptr align 4 [[TMP6]], <4 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[IDX]], 4 |
| ; CHECK-NEXT: [[NEXT_ACTIVE_LANE_MASK]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[IDX]], i64 [[N2]]) |
| ; CHECK-NEXT: [[EXTRACT_FIRST_LANE_MASK:%.*]] = extractelement <4 x i1> [[NEXT_ACTIVE_LANE_MASK]], i32 0 |
| ; CHECK-NEXT: [[FIRST_LANE_SET:%.*]] = xor i1 [[EXTRACT_FIRST_LANE_MASK]], true |
| ; CHECK-NEXT: br i1 [[FIRST_LANE_SET]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: br label %[[FOR_END:.*]] |
| ; CHECK: [[FOR_END]]: |
| ; CHECK-NEXT: ret void |
| ; |
| |
| entry: |
| br label %for.body |
| |
| for.body: ; preds = %entry, %for.body |
| %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] |
| %val = load i32, ptr %src, align 4 |
| %arrayidx = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv |
| store i32 %val, ptr %arrayidx, align 4 |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond.not = icmp eq i64 %indvars.iv.next, %n |
| br i1 %exitcond.not, label %for.end, label %for.body |
| |
| for.end: ; preds = %for.body, %entry |
| ret void |
| } |
| |
| ; The original loop had a conditional uniform load. In this case we actually |
| ; do need to perform conditional loads and so we end up using a gather instead. |
| ; However, we at least ensure the mask is the overlap of the loop predicate |
| ; and the original condition. |
| define void @cond_uniform_load(ptr noalias nocapture %dst, ptr nocapture readonly %src, ptr nocapture readonly %cond, i64 %n) #0 { |
| ; CHECK-LABEL: define void @cond_uniform_load( |
| ; CHECK-SAME: ptr noalias captures(none) [[DST:%.*]], ptr readonly captures(none) [[SRC:%.*]], ptr readonly captures(none) [[COND:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[N]], 4 |
| ; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[N]], 4 |
| ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 [[TMP5]], i64 0 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[N]]) |
| ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr [[SRC]], i64 0 |
| ; CHECK-NEXT: [[SRC_SPLAT:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX6]] |
| ; CHECK-NEXT: [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP6]], <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) |
| ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[COND_LOAD]], zeroinitializer |
| ; CHECK-NEXT: [[MASK:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP4]], <4 x i1> zeroinitializer |
| ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> align 4 [[SRC_SPLAT]], <4 x i1> [[MASK]], <4 x i32> poison) |
| ; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i32> [[WIDE_MASKED_GATHER]], <4 x i32> zeroinitializer |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX6]] |
| ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[PREDPHI]], ptr align 4 [[TMP7]], <4 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX6]], 4 |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX6]], i64 [[TMP3]]) |
| ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 |
| ; CHECK-NEXT: [[TMP9:%.*]] = xor i1 [[TMP8]], true |
| ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: br label %[[FOR_END:.*]] |
| ; CHECK: [[FOR_END]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: ; preds = %entry, %if.end |
| %index = phi i64 [ %index.next, %if.end ], [ 0, %entry ] |
| %arrayidx = getelementptr inbounds i32, ptr %cond, i64 %index |
| %0 = load i32, ptr %arrayidx, align 4 |
| %tobool.not = icmp eq i32 %0, 0 |
| br i1 %tobool.not, label %if.end, label %if.then |
| |
| if.then: ; preds = %for.body |
| %1 = load i32, ptr %src, align 4 |
| br label %if.end |
| |
| if.end: ; preds = %if.then, %for.body |
| %val.0 = phi i32 [ %1, %if.then ], [ 0, %for.body ] |
| %arrayidx1 = getelementptr inbounds i32, ptr %dst, i64 %index |
| store i32 %val.0, ptr %arrayidx1, align 4 |
| %index.next = add nuw i64 %index, 1 |
| %exitcond.not = icmp eq i64 %index.next, %n |
| br i1 %exitcond.not, label %for.end, label %for.body |
| |
| for.end: ; preds = %for.inc, %entry |
| ret void |
| } |
| |
| attributes #0 = { "target-features"="+neon,+sve,+v8.1a" vscale_range(2, 0) } |