| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s |
| |
| define double @test(ptr %0, ptr %1) { |
| ; CHECK-LABEL: define double @test( |
| ; CHECK-SAME: ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 144 |
| ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 232 |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr [[TMP2]], align 8 |
| ; CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8 |
| ; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[TMP3]], align 8 |
| ; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP1]], align 8 |
| ; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP0]], align 8 |
| ; CHECK-NEXT: [[TMP9:%.*]] = fmul reassoc nsz <4 x double> [[TMP4]], splat (double 1.000000e+00) |
| ; CHECK-NEXT: [[TMP10:%.*]] = call reassoc nsz double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP9]]) |
| ; CHECK-NEXT: [[TMP11:%.*]] = fmul double [[TMP5]], 2.000000e+00 |
| ; CHECK-NEXT: [[OP_RDX5:%.*]] = fadd reassoc nsz double [[TMP11]], [[TMP8]] |
| ; CHECK-NEXT: [[OP_RDX6:%.*]] = fadd reassoc nsz double [[OP_RDX5]], [[TMP6]] |
| ; CHECK-NEXT: [[OP_RDX7:%.*]] = fadd reassoc nsz double [[OP_RDX6]], [[TMP7]] |
| ; CHECK-NEXT: [[OP_RDX8:%.*]] = fadd reassoc nsz double [[OP_RDX7]], [[TMP10]] |
| ; CHECK-NEXT: ret double [[OP_RDX8]] |
| ; |
| entry: |
| %2 = getelementptr i8, ptr %1, i64 144 |
| %3 = getelementptr i8, ptr %1, i64 152 |
| %4 = getelementptr i8, ptr %1, i64 160 |
| %5 = getelementptr i8, ptr %1, i64 168 |
| %6 = getelementptr i8, ptr %1, i64 232 |
| %7 = load double, ptr %2, align 8 |
| %8 = load double, ptr %3, align 8 |
| %9 = fadd reassoc nsz double %8, %7 |
| %10 = load double, ptr %4, align 8 |
| %11 = fadd reassoc nsz double %10, %9 |
| %12 = load double, ptr %5, align 8 |
| %13 = fadd reassoc nsz double %12, %11 |
| %14 = load double, ptr %0, align 8 |
| %15 = fadd reassoc nsz double %14, %13 |
| %16 = fadd reassoc nsz double %14, %15 |
| %17 = load double, ptr %6, align 8 |
| %18 = fadd reassoc nsz double %17, %16 |
| %19 = load double, ptr %1, align 8 |
| %20 = fadd reassoc nsz double %19, %18 |
| %21 = load double, ptr %0, align 8 |
| %22 = fadd reassoc nsz double %21, %20 |
| ret double %22 |
| } |