|  | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py | 
|  | ; RUN: opt < %s -mtriple=x86_64 -passes=slp-vectorizer -S -mcpu=skylake-avx512 | FileCheck %s | 
|  |  | 
|  | declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg, <2 x i1>) | 
|  |  | 
|  | define void @rdx_feeds_single_insert(<2 x double> %v, ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2) { | 
|  | ; CHECK-LABEL: @rdx_feeds_single_insert( | 
|  | ; CHECK-NEXT:  entry: | 
|  | ; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x double>, ptr [[ARG1:%.*]], align 8 | 
|  | ; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast <8 x double> [[TMP0]], <double 1.000000e+01, double 1.100000e+01, double 1.200000e+01, double 1.300000e+01, double 1.400000e+01, double 1.500000e+01, double 1.600000e+01, double 1.700000e+01> | 
|  | ; CHECK-NEXT:    [[TMP2:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double 0.000000e+00, <8 x double> [[TMP1]]) | 
|  | ; CHECK-NEXT:    [[I:%.*]] = insertelement <2 x double> [[V:%.*]], double [[TMP2]], i64 1 | 
|  | ; CHECK-NEXT:    [[P:%.*]] = getelementptr inbounds double, ptr [[ARG2:%.*]], <2 x i64> <i64 0, i64 16> | 
|  | ; CHECK-NEXT:    call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[I]], <2 x ptr> [[P]], i32 8, <2 x i1> splat (i1 true)) | 
|  | ; CHECK-NEXT:    ret void | 
|  | ; | 
|  | entry: | 
|  | %ld0.0 = load double, ptr %arg1, align 8 | 
|  | %mul1.0 = fmul fast double %ld0.0, 10.0 | 
|  | %gep0.1 = getelementptr inbounds double, ptr %arg1, i64 1 | 
|  | %ld0.1 = load double, ptr %gep0.1, align 8 | 
|  | %mul1.1 = fmul fast double %ld0.1, 11.0 | 
|  | %rdx1.0 = fadd fast double %mul1.0, %mul1.1 | 
|  | %gep0.2 = getelementptr inbounds double, ptr %arg1, i64 2 | 
|  | %ld0.2 = load double, ptr %gep0.2, align 8 | 
|  | %mul1.2 = fmul fast double %ld0.2, 12.0 | 
|  | %rdx1.1 = fadd fast double %rdx1.0, %mul1.2 | 
|  | %gep0.3 = getelementptr inbounds double, ptr %arg1, i64 3 | 
|  | %ld0.3 = load double, ptr %gep0.3, align 8 | 
|  | %mul1.3 = fmul fast double %ld0.3, 13.0 | 
|  | %rdx1.2 = fadd fast double %rdx1.1, %mul1.3 | 
|  | %gep0.4 = getelementptr inbounds double, ptr %arg1, i64 4 | 
|  | %ld0.4 = load double, ptr %gep0.4, align 8 | 
|  | %mul1.4 = fmul fast double %ld0.4, 14.0 | 
|  | %rdx1.3 = fadd fast double %rdx1.2, %mul1.4 | 
|  | %gep0.5 = getelementptr inbounds double, ptr %arg1, i64 5 | 
|  | %ld0.5 = load double, ptr %gep0.5, align 8 | 
|  | %mul1.5 = fmul fast double %ld0.5, 15.0 | 
|  | %rdx1.4 = fadd fast double %rdx1.3, %mul1.5 | 
|  | %gep0.6 = getelementptr inbounds double, ptr %arg1, i64 6 | 
|  | %ld0.6 = load double, ptr %gep0.6, align 8 | 
|  | %mul1.6 = fmul fast double %ld0.6, 16.0 | 
|  | %rdx1.5 = fadd fast double %rdx1.4, %mul1.6 | 
|  | %gep0.7 = getelementptr inbounds double, ptr %arg1, i64 7 | 
|  | %ld0.7 = load double, ptr %gep0.7, align 8 | 
|  | %mul1.7 = fmul fast double %ld0.7, 17.0 | 
|  | %rdx1 = fadd fast double %rdx1.5, %mul1.7 | 
|  | %i = insertelement <2 x double> %v, double %rdx1, i64 1 | 
|  | %p = getelementptr inbounds double, ptr %arg2, <2 x i64> <i64 0, i64 16> | 
|  | call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %i, <2 x ptr> %p, i32 8, <2 x i1> <i1 true, i1 true>) | 
|  | ret void | 
|  | } | 
|  |  |