blob: 2dceb27165c4d4794d66b631c2c05dd83c1f1e10 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes="default<O3>" -mcpu=neoverse-v2 -S < %s | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
target triple = "aarch64"
define void @same_op2(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op2(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[WIDE_VEC15:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC18:%.*]] = load <8 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[WIDE_VEC21:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP0]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[WIDE_VEC27:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <8 x float> [[WIDE_VEC18]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <8 x float> [[WIDE_VEC24]], [[TMP7]]
; CHECK-NEXT: store <8 x float> [[INTERLEAVED_VEC]], ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <8 x float> [[WIDE_VEC21]], [[WIDE_VEC15]]
; CHECK-NEXT: [[INTERLEAVED_VEC30:%.*]] = fadd fast <8 x float> [[WIDE_VEC27]], [[TMP8]]
; CHECK-NEXT: store <8 x float> [[INTERLEAVED_VEC30]], ptr [[TMP6]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 576
; CHECK-NEXT: br i1 [[TMP9]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[FOR_END13]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 2, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc11, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end13
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 2
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%3 = load i32, ptr %i, align 4
%4 = load i32, ptr %j, align 4
%add = add nsw i32 %3, %4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
%5 = load float, ptr %arrayidx, align 4
%6 = load ptr, ptr %b.addr, align 8
%7 = load i32, ptr %i, align 4
%8 = load i32, ptr %j, align 4
%add4 = add nsw i32 %7, %8
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
%9 = load float, ptr %arrayidx6, align 4
%mul = fmul fast float %5, %9
%10 = load ptr, ptr %a.addr, align 8
%11 = load i32, ptr %i, align 4
%12 = load i32, ptr %j, align 4
%add7 = add nsw i32 %11, %12
%idxprom8 = sext i32 %add7 to i64
%arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
%13 = load float, ptr %arrayidx9, align 4
%add10 = fadd fast float %13, %mul
store float %add10, ptr %arrayidx9, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%14 = load i32, ptr %j, align 4
%inc = add nsw i32 %14, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc11
for.inc11: ; preds = %for.end
%15 = load i32, ptr %i, align 4
%add12 = add nsw i32 %15, 2
store i32 %add12, ptr %i, align 4
br label %for.cond
for.end13: ; preds = %for.cond
ret void
}
define void @same_op2_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op2_splat(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[C]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP3]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
; CHECK-NEXT: [[WIDE_VEC13:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: [[WIDE_VEC16:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[WIDE_VEC19:%.*]] = load <8 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <8 x float> [[WIDE_VEC]], [[TMP1]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <8 x float> [[WIDE_VEC16]], [[TMP8]]
; CHECK-NEXT: store <8 x float> [[INTERLEAVED_VEC]], ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <8 x float> [[WIDE_VEC13]], [[TMP2]]
; CHECK-NEXT: [[INTERLEAVED_VEC22:%.*]] = fadd fast <8 x float> [[WIDE_VEC19]], [[TMP9]]
; CHECK-NEXT: store <8 x float> [[INTERLEAVED_VEC22]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 576
; CHECK-NEXT: br i1 [[TMP10]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[FOR_END11]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 2, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc9, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end11
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 2
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%arrayidx = getelementptr inbounds float, ptr %2, i64 0
%3 = load float, ptr %arrayidx, align 4
%4 = load ptr, ptr %b.addr, align 8
%5 = load i32, ptr %i, align 4
%6 = load i32, ptr %j, align 4
%add = add nsw i32 %5, %6
%idxprom = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
%7 = load float, ptr %arrayidx4, align 4
%mul = fmul fast float %3, %7
%8 = load ptr, ptr %a.addr, align 8
%9 = load i32, ptr %i, align 4
%10 = load i32, ptr %j, align 4
%add5 = add nsw i32 %9, %10
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
%11 = load float, ptr %arrayidx7, align 4
%add8 = fadd fast float %11, %mul
store float %add8, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%12 = load i32, ptr %j, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc9
for.inc9: ; preds = %for.end
%13 = load i32, ptr %i, align 4
%add10 = add nsw i32 %13, 2
store i32 %add10, ptr %i, align 4
br label %for.cond
for.end11: ; preds = %for.cond
ret void
}
define void @same_op3(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op3(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC16:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC20:%.*]] = load <12 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <12 x float> [[WIDE_VEC16]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <12 x float> [[WIDE_VEC20]], [[TMP3]]
; CHECK-NEXT: store <12 x float> [[INTERLEAVED_VEC]], ptr [[TMP2]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 384
; CHECK-NEXT: br i1 [[TMP4]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[FOR_END13]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 3, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc11, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end13
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 3
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%3 = load i32, ptr %i, align 4
%4 = load i32, ptr %j, align 4
%add = add nsw i32 %3, %4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
%5 = load float, ptr %arrayidx, align 4
%6 = load ptr, ptr %b.addr, align 8
%7 = load i32, ptr %i, align 4
%8 = load i32, ptr %j, align 4
%add4 = add nsw i32 %7, %8
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
%9 = load float, ptr %arrayidx6, align 4
%mul = fmul fast float %5, %9
%10 = load ptr, ptr %a.addr, align 8
%11 = load i32, ptr %i, align 4
%12 = load i32, ptr %j, align 4
%add7 = add nsw i32 %11, %12
%idxprom8 = sext i32 %add7 to i64
%arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
%13 = load float, ptr %arrayidx9, align 4
%add10 = fadd fast float %13, %mul
store float %add10, ptr %arrayidx9, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%14 = load i32, ptr %j, align 4
%inc = add nsw i32 %14, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc11
for.inc11: ; preds = %for.end
%15 = load i32, ptr %i, align 4
%add12 = add nsw i32 %15, 3
store i32 %add12, ptr %i, align 4
br label %for.cond
for.end13: ; preds = %for.cond
ret void
}
define void @same_op3_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op3_splat(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[C]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <12 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC14:%.*]] = load <12 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <12 x float> [[WIDE_VEC]], [[TMP2]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <12 x float> [[WIDE_VEC14]], [[TMP4]]
; CHECK-NEXT: store <12 x float> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 384
; CHECK-NEXT: br i1 [[TMP12]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[FOR_END11]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 3, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc9, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end11
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 3
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%arrayidx = getelementptr inbounds float, ptr %2, i64 0
%3 = load float, ptr %arrayidx, align 4
%4 = load ptr, ptr %b.addr, align 8
%5 = load i32, ptr %i, align 4
%6 = load i32, ptr %j, align 4
%add = add nsw i32 %5, %6
%idxprom = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
%7 = load float, ptr %arrayidx4, align 4
%mul = fmul fast float %3, %7
%8 = load ptr, ptr %a.addr, align 8
%9 = load i32, ptr %i, align 4
%10 = load i32, ptr %j, align 4
%add5 = add nsw i32 %9, %10
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
%11 = load float, ptr %arrayidx7, align 4
%add8 = fadd fast float %11, %mul
store float %add8, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%12 = load i32, ptr %j, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc9
for.inc9: ; preds = %for.end
%13 = load i32, ptr %i, align 4
%add10 = add nsw i32 %13, 3
store i32 %add10, ptr %i, align 4
br label %for.cond
for.end11: ; preds = %for.cond
ret void
}
define void @same_op4(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op4(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC17:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC22:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <16 x float> [[WIDE_VEC17]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC22]], [[TMP3]]
; CHECK-NEXT: store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP2]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 288
; CHECK-NEXT: br i1 [[TMP4]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END13]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 4, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc11, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end13
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 4
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%3 = load i32, ptr %i, align 4
%4 = load i32, ptr %j, align 4
%add = add nsw i32 %3, %4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
%5 = load float, ptr %arrayidx, align 4
%6 = load ptr, ptr %b.addr, align 8
%7 = load i32, ptr %i, align 4
%8 = load i32, ptr %j, align 4
%add4 = add nsw i32 %7, %8
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
%9 = load float, ptr %arrayidx6, align 4
%mul = fmul fast float %5, %9
%10 = load ptr, ptr %a.addr, align 8
%11 = load i32, ptr %i, align 4
%12 = load i32, ptr %j, align 4
%add7 = add nsw i32 %11, %12
%idxprom8 = sext i32 %add7 to i64
%arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
%13 = load float, ptr %arrayidx9, align 4
%add10 = fadd fast float %13, %mul
store float %add10, ptr %arrayidx9, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%14 = load i32, ptr %j, align 4
%inc = add nsw i32 %14, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc11
for.inc11: ; preds = %for.end
%15 = load i32, ptr %i, align 4
%add12 = add nsw i32 %15, 4
store i32 %add12, ptr %i, align 4
br label %for.cond
for.end13: ; preds = %for.cond
ret void
}
define void @same_op4_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op4_splat(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[C]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <16 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC15:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <16 x float> [[WIDE_VEC]], [[TMP1]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC15]], [[TMP4]]
; CHECK-NEXT: store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 288
; CHECK-NEXT: br i1 [[TMP5]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[FOR_END11]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 4, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc9, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end11
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 4
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%arrayidx = getelementptr inbounds float, ptr %2, i64 0
%3 = load float, ptr %arrayidx, align 4
%4 = load ptr, ptr %b.addr, align 8
%5 = load i32, ptr %i, align 4
%6 = load i32, ptr %j, align 4
%add = add nsw i32 %5, %6
%idxprom = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
%7 = load float, ptr %arrayidx4, align 4
%mul = fmul fast float %3, %7
%8 = load ptr, ptr %a.addr, align 8
%9 = load i32, ptr %i, align 4
%10 = load i32, ptr %j, align 4
%add5 = add nsw i32 %9, %10
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
%11 = load float, ptr %arrayidx7, align 4
%add8 = fadd fast float %11, %mul
store float %add8, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%12 = load i32, ptr %j, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc9
for.inc9: ; preds = %for.end
%13 = load i32, ptr %i, align 4
%add10 = add nsw i32 %13, 4
store i32 %add10, ptr %i, align 4
br label %for.cond
for.end11: ; preds = %for.cond
ret void
}
define void @same_op6(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op6(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
; CHECK-NEXT: store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x float>, ptr [[ARRAYIDX6_4]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x float>, ptr [[ARRAYIDX9_4]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <2 x float> [[TMP9]], [[TMP8]]
; CHECK-NEXT: store <2 x float> [[TMP10]], ptr [[ARRAYIDX9_4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 6
; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1146
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END13:.*]]
; CHECK: [[FOR_END13]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 6, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc11, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end13
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 6
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%3 = load i32, ptr %i, align 4
%4 = load i32, ptr %j, align 4
%add = add nsw i32 %3, %4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
%5 = load float, ptr %arrayidx, align 4
%6 = load ptr, ptr %b.addr, align 8
%7 = load i32, ptr %i, align 4
%8 = load i32, ptr %j, align 4
%add4 = add nsw i32 %7, %8
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
%9 = load float, ptr %arrayidx6, align 4
%mul = fmul fast float %5, %9
%10 = load ptr, ptr %a.addr, align 8
%11 = load i32, ptr %i, align 4
%12 = load i32, ptr %j, align 4
%add7 = add nsw i32 %11, %12
%idxprom8 = sext i32 %add7 to i64
%arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
%13 = load float, ptr %arrayidx9, align 4
%add10 = fadd fast float %13, %mul
store float %add10, ptr %arrayidx9, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%14 = load i32, ptr %j, align 4
%inc = add nsw i32 %14, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc11
for.inc11: ; preds = %for.end
%15 = load i32, ptr %i, align 4
%add12 = add nsw i32 %15, 6
store i32 %add12, ptr %i, align 4
br label %for.cond
for.end13: ; preds = %for.cond
ret void
}
define void @same_op6_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op6_splat(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[C]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP2]]
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[ARRAYIDX7]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
; CHECK-NEXT: [[ARRAYIDX4_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP9]]
; CHECK-NEXT: [[ARRAYIDX7_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX4_4]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <2 x float> [[TMP10]], [[TMP4]]
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX7_4]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <2 x float> [[TMP12]], [[TMP11]]
; CHECK-NEXT: store <2 x float> [[TMP13]], ptr [[ARRAYIDX7_4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 6
; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1146
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END11:.*]]
; CHECK: [[FOR_END11]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 6, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc9, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end11
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 6
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%arrayidx = getelementptr inbounds float, ptr %2, i64 0
%3 = load float, ptr %arrayidx, align 4
%4 = load ptr, ptr %b.addr, align 8
%5 = load i32, ptr %i, align 4
%6 = load i32, ptr %j, align 4
%add = add nsw i32 %5, %6
%idxprom = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
%7 = load float, ptr %arrayidx4, align 4
%mul = fmul fast float %3, %7
%8 = load ptr, ptr %a.addr, align 8
%9 = load i32, ptr %i, align 4
%10 = load i32, ptr %j, align 4
%add5 = add nsw i32 %9, %10
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
%11 = load float, ptr %arrayidx7, align 4
%add8 = fadd fast float %11, %mul
store float %add8, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%12 = load i32, ptr %j, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc9
for.inc9: ; preds = %for.end
%13 = load i32, ptr %i, align 4
%add10 = add nsw i32 %13, 6
store i32 %add10, ptr %i, align 4
br label %for.cond
for.end11: ; preds = %for.cond
ret void
}
define void @same_op8(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op8(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_COND1_PREHEADER:.*]]
; CHECK: [[FOR_COND1_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
; CHECK-NEXT: store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDVARS_IV]], 4
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
; CHECK-NEXT: [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
; CHECK-NEXT: [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX6_4]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX9_4]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[TMP9]], [[TMP8]]
; CHECK-NEXT: store <4 x float> [[TMP10]], ptr [[ARRAYIDX9_4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 8
; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1144
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END13:.*]]
; CHECK: [[FOR_END13]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 8, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc11, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end13
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 8
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%3 = load i32, ptr %i, align 4
%4 = load i32, ptr %j, align 4
%add = add nsw i32 %3, %4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
%5 = load float, ptr %arrayidx, align 4
%6 = load ptr, ptr %b.addr, align 8
%7 = load i32, ptr %i, align 4
%8 = load i32, ptr %j, align 4
%add4 = add nsw i32 %7, %8
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
%9 = load float, ptr %arrayidx6, align 4
%mul = fmul fast float %5, %9
%10 = load ptr, ptr %a.addr, align 8
%11 = load i32, ptr %i, align 4
%12 = load i32, ptr %j, align 4
%add7 = add nsw i32 %11, %12
%idxprom8 = sext i32 %add7 to i64
%arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
%13 = load float, ptr %arrayidx9, align 4
%add10 = fadd fast float %13, %mul
store float %add10, ptr %arrayidx9, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%14 = load i32, ptr %j, align 4
%inc = add nsw i32 %14, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc11
for.inc11: ; preds = %for.end
%15 = load i32, ptr %i, align 4
%add12 = add nsw i32 %15, 8
store i32 %add12, ptr %i, align 4
br label %for.cond
for.end13: ; preds = %for.cond
ret void
}
define void @same_op8_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
; CHECK-LABEL: define void @same_op8_splat(
; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[C]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <16 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC19:%.*]] = load <16 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <16 x float> [[WIDE_VEC]], [[TMP1]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC19]], [[TMP4]]
; CHECK-NEXT: store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP6]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 144
; CHECK-NEXT: br i1 [[TMP25]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[FOR_END11]]:
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca ptr, align 8
%b.addr = alloca ptr, align 8
%c.addr = alloca ptr, align 8
%N = alloca i32, align 4
%i = alloca i32, align 4
%j = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store ptr %b, ptr %b.addr, align 8
store ptr %c, ptr %c.addr, align 8
store i32 8, ptr %N, align 4
store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc9, %entry
%0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 1152
br i1 %cmp, label %for.body, label %for.end11
for.body: ; preds = %for.cond
store i32 0, ptr %j, align 4
br label %for.cond1
for.cond1: ; preds = %for.inc, %for.body
%1 = load i32, ptr %j, align 4
%cmp2 = icmp slt i32 %1, 8
br i1 %cmp2, label %for.body3, label %for.end
for.body3: ; preds = %for.cond1
%2 = load ptr, ptr %c.addr, align 8
%arrayidx = getelementptr inbounds float, ptr %2, i64 0
%3 = load float, ptr %arrayidx, align 4
%4 = load ptr, ptr %b.addr, align 8
%5 = load i32, ptr %i, align 4
%6 = load i32, ptr %j, align 4
%add = add nsw i32 %5, %6
%idxprom = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
%7 = load float, ptr %arrayidx4, align 4
%mul = fmul fast float %3, %7
%8 = load ptr, ptr %a.addr, align 8
%9 = load i32, ptr %i, align 4
%10 = load i32, ptr %j, align 4
%add5 = add nsw i32 %9, %10
%idxprom6 = sext i32 %add5 to i64
%arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
%11 = load float, ptr %arrayidx7, align 4
%add8 = fadd fast float %11, %mul
store float %add8, ptr %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body3
%12 = load i32, ptr %j, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, ptr %j, align 4
br label %for.cond1
for.end: ; preds = %for.cond1
br label %for.inc9
for.inc9: ; preds = %for.end
%13 = load i32, ptr %i, align 4
%add10 = add nsw i32 %13, 8
store i32 %add10, ptr %i, align 4
br label %for.cond
for.end11: ; preds = %for.cond
ret void
}
; This test contains an example of a SAXPY loop manually unrolled by five:
;
; void saxpy(long n, float a, float *x, float *y) {
; for (int i = 0; i < n; i += 5) {
; y[i] += a * x[i];
; y[i + 1] += a * x[i + 1];
; y[i + 2] += a * x[i + 2];
; y[i + 3] += a * x[i + 3];
; y[i + 4] += a * x[i + 4];
; }
; }
;
define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) {
; CHECK-LABEL: define void @saxpy_5(
; CHECK-SAME: i64 [[N:%.*]], float [[A:%.*]], ptr readonly captures(none) [[X:%.*]], ptr noalias captures(none) [[Y:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i64 [[N]], 0
; CHECK-NEXT: br i1 [[TMP0]], label %[[LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
; CHECK: [[LOOP_PREHEADER]]:
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[N]], -1
; CHECK-NEXT: [[TMP2:%.*]] = udiv i64 [[TMP1]], 5
; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 6
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[LOOP_PREHEADER11:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775806
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 5
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[A]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <10 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <10 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC5:%.*]] = load <10 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <10 x float> [[WIDE_VEC]], [[TMP5]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = fadd fast <10 x float> [[WIDE_VEC5]], [[TMP8]]
; CHECK-NEXT: store <10 x float> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT]], label %[[LOOP_PREHEADER11]]
; CHECK: [[LOOP_PREHEADER11]]:
; CHECK-NEXT: [[I1_PH:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x float> [[TMP10]], <4 x float> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[I1:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[I1_PH]], %[[LOOP_PREHEADER11]] ]
; CHECK-NEXT: [[XGEP1:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[I1]]
; CHECK-NEXT: [[YGEP1:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[I1]]
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x float>, ptr [[XGEP1]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = fmul fast <4 x float> [[TMP12]], [[TMP11]]
; CHECK-NEXT: [[TMP14:%.*]] = load <4 x float>, ptr [[YGEP1]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = fadd fast <4 x float> [[TMP14]], [[TMP13]]
; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[YGEP1]], align 4
; CHECK-NEXT: [[I5:%.*]] = add nuw nsw i64 [[I1]], 4
; CHECK-NEXT: [[XGEP5:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[I5]]
; CHECK-NEXT: [[X5:%.*]] = load float, ptr [[XGEP5]], align 4
; CHECK-NEXT: [[AX5:%.*]] = fmul fast float [[X5]], [[A]]
; CHECK-NEXT: [[YGEP5:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[I5]]
; CHECK-NEXT: [[Y5:%.*]] = load float, ptr [[YGEP5]], align 4
; CHECK-NEXT: [[AXPY5:%.*]] = fadd fast float [[Y5]], [[AX5]]
; CHECK-NEXT: store float [[AXPY5]], ptr [[YGEP5]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I1]], 5
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[N]], [[I_NEXT]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
%0 = icmp sgt i64 %n, 0
br i1 %0, label %loop, label %exit
loop:
%i1 = phi i64 [ %i.next, %loop ], [ 0, %entry ]
%xgep1 = getelementptr inbounds nuw float, ptr %x, i64 %i1
%x1 = load float, ptr %xgep1, align 4
%ax1 = fmul fast float %x1, %a
%ygep1 = getelementptr inbounds nuw float, ptr %y, i64 %i1
%y1 = load float, ptr %ygep1, align 4
%axpy1 = fadd fast float %y1, %ax1
store float %axpy1, ptr %ygep1, align 4
%i2 = add nuw nsw i64 %i1, 1
%xgep2 = getelementptr inbounds nuw float, ptr %x, i64 %i2
%x2 = load float, ptr %xgep2, align 4
%ax2 = fmul fast float %x2, %a
%ygep2 = getelementptr inbounds nuw float, ptr %y, i64 %i2
%y2 = load float, ptr %ygep2, align 4
%axpy2 = fadd fast float %y2, %ax2
store float %axpy2, ptr %ygep2, align 4
%i3 = add nuw nsw i64 %i1, 2
%xgep3 = getelementptr inbounds nuw float, ptr %x, i64 %i3
%x3 = load float, ptr %xgep3, align 4
%ax3 = fmul fast float %x3, %a
%ygep3 = getelementptr inbounds nuw float, ptr %y, i64 %i3
%y3 = load float, ptr %ygep3, align 4
%axpy3 = fadd fast float %y3, %ax3
store float %axpy3, ptr %ygep3, align 4
%i4 = add nuw nsw i64 %i1, 3
%xgep4 = getelementptr inbounds nuw float, ptr %x, i64 %i4
%x4 = load float, ptr %xgep4, align 4
%ax4 = fmul fast float %x4, %a
%ygep4 = getelementptr inbounds nuw float, ptr %y, i64 %i4
%y4 = load float, ptr %ygep4, align 4
%axpy4 = fadd fast float %y4, %ax4
store float %axpy4, ptr %ygep4, align 4
%i5 = add nuw nsw i64 %i1, 4
%xgep5 = getelementptr inbounds nuw float, ptr %x, i64 %i5
%x5 = load float, ptr %xgep5, align 4
%ax5 = fmul fast float %x5, %a
%ygep5 = getelementptr inbounds nuw float, ptr %y, i64 %i5
%y5 = load float, ptr %ygep5, align 4
%axpy5 = fadd fast float %y5, %ax5
store float %axpy5, ptr %ygep5, align 4
%i.next = add nuw nsw i64 %i1, 5
%cmp = icmp sgt i64 %n, %i.next
br i1 %cmp, label %loop, label %exit
exit:
ret void
}
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
;.