blob: 45279296d296a7175e112e991485f9a1c3caabe8 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64 -passes=slp-vectorizer -S -mcpu=skylake-avx512 | FileCheck %s
; The test represents the case with multiple vectorization possibilities
; but the most effective way to vectorize it is to match both 8-way reductions
; feeding the insertelement vector build sequence.
declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg, <2 x i1>)
define void @test(ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr inbounds double, ptr [[ARG:%.*]], i64 1
; CHECK-NEXT: [[LD1_0:%.*]] = load double, ptr [[GEP1_0]], align 8
; CHECK-NEXT: [[LD0_0:%.*]] = load double, ptr [[ARG1:%.*]], align 8
; CHECK-NEXT: [[MUL1_0:%.*]] = fmul fast double [[LD0_0]], [[LD1_0]]
; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 16
; CHECK-NEXT: [[LD2_0:%.*]] = load double, ptr [[GEP2_0]], align 8
; CHECK-NEXT: [[MUL2_0:%.*]] = fmul fast double [[LD2_0]], [[LD1_0]]
; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 3
; CHECK-NEXT: [[LD1_1:%.*]] = load double, ptr [[GEP1_1]], align 8
; CHECK-NEXT: [[GEP0_1:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 1
; CHECK-NEXT: [[LD0_1:%.*]] = load double, ptr [[GEP0_1]], align 8
; CHECK-NEXT: [[MUL1_1:%.*]] = fmul fast double [[LD0_1]], [[LD1_1]]
; CHECK-NEXT: [[RDX1_0:%.*]] = fadd fast double [[MUL1_0]], [[MUL1_1]]
; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 17
; CHECK-NEXT: [[LD2_1:%.*]] = load double, ptr [[GEP2_1]], align 8
; CHECK-NEXT: [[MUL2_1:%.*]] = fmul fast double [[LD2_1]], [[LD1_1]]
; CHECK-NEXT: [[RDX2_0:%.*]] = fadd fast double [[MUL2_0]], [[MUL2_1]]
; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 5
; CHECK-NEXT: [[LD1_2:%.*]] = load double, ptr [[GEP1_2]], align 8
; CHECK-NEXT: [[GEP0_2:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 2
; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 18
; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 7
; CHECK-NEXT: [[LD1_3:%.*]] = load double, ptr [[GEP1_3]], align 8
; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 9
; CHECK-NEXT: [[LD1_4:%.*]] = load double, ptr [[GEP1_4]], align 8
; CHECK-NEXT: [[GEP1_5:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 11
; CHECK-NEXT: [[LD1_5:%.*]] = load double, ptr [[GEP1_5]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[GEP0_2]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> poison, double [[LD1_2]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x double> [[TMP1]], double [[LD1_3]], i32 1
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> [[TMP2]], double [[LD1_4]], i32 2
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[LD1_5]], i32 3
; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <4 x double> [[TMP0]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x double>, ptr [[GEP2_2]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <4 x double> [[TMP6]], [[TMP4]]
; CHECK-NEXT: [[GEP1_6:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 13
; CHECK-NEXT: [[LD1_6:%.*]] = load double, ptr [[GEP1_6]], align 8
; CHECK-NEXT: [[GEP0_6:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 6
; CHECK-NEXT: [[LD0_6:%.*]] = load double, ptr [[GEP0_6]], align 8
; CHECK-NEXT: [[MUL1_6:%.*]] = fmul fast double [[LD0_6]], [[LD1_6]]
; CHECK-NEXT: [[GEP2_6:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 22
; CHECK-NEXT: [[LD2_6:%.*]] = load double, ptr [[GEP2_6]], align 8
; CHECK-NEXT: [[MUL2_6:%.*]] = fmul fast double [[LD2_6]], [[LD1_6]]
; CHECK-NEXT: [[GEP1_7:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 15
; CHECK-NEXT: [[LD1_7:%.*]] = load double, ptr [[GEP1_7]], align 8
; CHECK-NEXT: [[GEP0_7:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 7
; CHECK-NEXT: [[LD0_7:%.*]] = load double, ptr [[GEP0_7]], align 8
; CHECK-NEXT: [[MUL1_7:%.*]] = fmul fast double [[LD0_7]], [[LD1_7]]
; CHECK-NEXT: [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP5]])
; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast double [[TMP10]], [[MUL1_6]]
; CHECK-NEXT: [[OP_RDX4:%.*]] = fadd fast double [[MUL1_7]], [[RDX1_0]]
; CHECK-NEXT: [[TMP8:%.*]] = fadd fast double [[OP_RDX3]], [[OP_RDX4]]
; CHECK-NEXT: [[GEP2_7:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 23
; CHECK-NEXT: [[LD2_7:%.*]] = load double, ptr [[GEP2_7]], align 8
; CHECK-NEXT: [[MUL2_7:%.*]] = fmul fast double [[LD2_7]], [[LD1_7]]
; CHECK-NEXT: [[TMP11:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP7]])
; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast double [[TMP11]], [[MUL2_6]]
; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast double [[MUL2_7]], [[RDX2_0]]
; CHECK-NEXT: [[TMP9:%.*]] = fadd fast double [[OP_RDX]], [[OP_RDX1]]
; CHECK-NEXT: [[I142:%.*]] = insertelement <2 x double> poison, double [[TMP8]], i64 0
; CHECK-NEXT: [[I143:%.*]] = insertelement <2 x double> [[I142]], double [[TMP9]], i64 1
; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, ptr [[ARG2:%.*]], <2 x i64> <i64 0, i64 16>
; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[I143]], <2 x ptr> [[P]], i32 8, <2 x i1> splat (i1 true))
; CHECK-NEXT: ret void
;
entry:
%gep1.0 = getelementptr inbounds double, ptr %arg, i64 1
%ld1.0 = load double, ptr %gep1.0, align 8
%ld0.0 = load double, ptr %arg1, align 8
%mul1.0 = fmul fast double %ld0.0, %ld1.0
%gep2.0 = getelementptr inbounds double, ptr %arg1, i64 16
%ld2.0 = load double, ptr %gep2.0, align 8
%mul2.0 = fmul fast double %ld2.0, %ld1.0
%gep1.1 = getelementptr inbounds double, ptr %arg, i64 3
%ld1.1 = load double, ptr %gep1.1, align 8
%gep0.1 = getelementptr inbounds double, ptr %arg1, i64 1
%ld0.1 = load double, ptr %gep0.1, align 8
%mul1.1 = fmul fast double %ld0.1, %ld1.1
%rdx1.0 = fadd fast double %mul1.0, %mul1.1
%gep2.1 = getelementptr inbounds double, ptr %arg1, i64 17
%ld2.1 = load double, ptr %gep2.1, align 8
%mul2.1 = fmul fast double %ld2.1, %ld1.1
%rdx2.0 = fadd fast double %mul2.0, %mul2.1
%gep1.2 = getelementptr inbounds double, ptr %arg, i64 5
%ld1.2 = load double, ptr %gep1.2, align 8
%gep0.2 = getelementptr inbounds double, ptr %arg1, i64 2
%ld0.2 = load double, ptr %gep0.2, align 8
%mul1.2 = fmul fast double %ld0.2, %ld1.2
%rdx1.1 = fadd fast double %rdx1.0, %mul1.2
%gep2.2 = getelementptr inbounds double, ptr %arg1, i64 18
%ld2.2 = load double, ptr %gep2.2, align 8
%mul2.2 = fmul fast double %ld2.2, %ld1.2
%rdx2.1 = fadd fast double %rdx2.0, %mul2.2
%gep1.3 = getelementptr inbounds double, ptr %arg, i64 7
%ld1.3 = load double, ptr %gep1.3, align 8
%gep0.3 = getelementptr inbounds double, ptr %arg1, i64 3
%ld0.3 = load double, ptr %gep0.3, align 8
%mul1.3 = fmul fast double %ld0.3, %ld1.3
%rdx1.2 = fadd fast double %rdx1.1, %mul1.3
%gep2.3 = getelementptr inbounds double, ptr %arg1, i64 19
%ld2.3 = load double, ptr %gep2.3, align 8
%mul2.3 = fmul fast double %ld2.3, %ld1.3
%rdx2.2 = fadd fast double %rdx2.1, %mul2.3
%gep1.4 = getelementptr inbounds double, ptr %arg, i64 9
%ld1.4 = load double, ptr %gep1.4, align 8
%gep0.4 = getelementptr inbounds double, ptr %arg1, i64 4
%ld0.4 = load double, ptr %gep0.4, align 8
%mul1.4 = fmul fast double %ld0.4, %ld1.4
%rdx1.3 = fadd fast double %rdx1.2, %mul1.4
%gep2.4 = getelementptr inbounds double, ptr %arg1, i64 20
%ld2.4 = load double, ptr %gep2.4, align 8
%mul2.4 = fmul fast double %ld2.4, %ld1.4
%rdx2.3 = fadd fast double %rdx2.2, %mul2.4
%gep1.5 = getelementptr inbounds double, ptr %arg, i64 11
%ld1.5 = load double, ptr %gep1.5, align 8
%gep0.5 = getelementptr inbounds double, ptr %arg1, i64 5
%ld0.5 = load double, ptr %gep0.5, align 8
%mul1.5 = fmul fast double %ld0.5, %ld1.5
%rdx1.4 = fadd fast double %rdx1.3, %mul1.5
%gep2.5 = getelementptr inbounds double, ptr %arg1, i64 21
%ld2.5 = load double, ptr %gep2.5, align 8
%mul2.5 = fmul fast double %ld2.5, %ld1.5
%rdx2.4 = fadd fast double %rdx2.3, %mul2.5
%gep1.6 = getelementptr inbounds double, ptr %arg, i64 13
%ld1.6 = load double, ptr %gep1.6, align 8
%gep0.6 = getelementptr inbounds double, ptr %arg1, i64 6
%ld0.6 = load double, ptr %gep0.6, align 8
%mul1.6 = fmul fast double %ld0.6, %ld1.6
%rdx1.5 = fadd fast double %rdx1.4, %mul1.6
%gep2.6 = getelementptr inbounds double, ptr %arg1, i64 22
%ld2.6 = load double, ptr %gep2.6, align 8
%mul2.6 = fmul fast double %ld2.6, %ld1.6
%rdx2.5 = fadd fast double %rdx2.4, %mul2.6
%gep1.7 = getelementptr inbounds double, ptr %arg, i64 15
%ld1.7 = load double, ptr %gep1.7, align 8
%gep0.7 = getelementptr inbounds double, ptr %arg1, i64 7
%ld0.7 = load double, ptr %gep0.7, align 8
%mul1.7 = fmul fast double %ld0.7, %ld1.7
%rdx1 = fadd fast double %rdx1.5, %mul1.7
%gep2.7 = getelementptr inbounds double, ptr %arg1, i64 23
%ld2.7 = load double, ptr %gep2.7, align 8
%mul2.7 = fmul fast double %ld2.7, %ld1.7
%rdx2 = fadd fast double %rdx2.5, %mul2.7
%i142 = insertelement <2 x double> poison, double %rdx1, i64 0
%i143 = insertelement <2 x double> %i142, double %rdx2, i64 1
%p = getelementptr inbounds double, ptr %arg2, <2 x i64> <i64 0, i64 16>
call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %i143, <2 x ptr> %p, i32 8, <2 x i1> <i1 true, i1 true>)
ret void
}