|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F | 
|  | ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D | 
|  | ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F | 
|  | ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D | 
|  |  | 
|  | ;; TODO: Merge the offset of address calculation into the offset field of instructions. | 
|  |  | 
|  | %f2 = type <2 x float> | 
|  | %f4 = type <4 x float> | 
|  | %f8 = type <8 x float> | 
|  | %d2 = type <2 x double> | 
|  | %d4 = type <4 x double> | 
|  | %d8 = type <8 x double> | 
|  |  | 
|  | define void @test_zero(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_zero: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA32F-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA32F-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA32F-NEXT:    fld.s $fa3, $a0, 8 | 
|  | ; LA32F-NEXT:    movgr2fr.w $fa4, $zero | 
|  | ; LA32F-NEXT:    fadd.s $fa1, $fa1, $fa4 | 
|  | ; LA32F-NEXT:    fadd.s $fa2, $fa2, $fa4 | 
|  | ; LA32F-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa4 | 
|  | ; LA32F-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA32F-NEXT:    fst.s $fa3, $a1, 8 | 
|  | ; LA32F-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA32F-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_zero: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA32D-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA32D-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA32D-NEXT:    fld.s $fa3, $a0, 8 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa4, $zero | 
|  | ; LA32D-NEXT:    fadd.s $fa1, $fa1, $fa4 | 
|  | ; LA32D-NEXT:    fadd.s $fa2, $fa2, $fa4 | 
|  | ; LA32D-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA32D-NEXT:    fadd.s $fa0, $fa0, $fa4 | 
|  | ; LA32D-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA32D-NEXT:    fst.s $fa3, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA32D-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_zero: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA64F-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA64F-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA64F-NEXT:    fld.s $fa3, $a0, 8 | 
|  | ; LA64F-NEXT:    movgr2fr.w $fa4, $zero | 
|  | ; LA64F-NEXT:    fadd.s $fa1, $fa1, $fa4 | 
|  | ; LA64F-NEXT:    fadd.s $fa2, $fa2, $fa4 | 
|  | ; LA64F-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa4 | 
|  | ; LA64F-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA64F-NEXT:    fst.s $fa3, $a1, 8 | 
|  | ; LA64F-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA64F-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_zero: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    vld $vr0, $a0, 0 | 
|  | ; LA64D-NEXT:    vrepli.b $vr1, 0 | 
|  | ; LA64D-NEXT:    vfadd.s $vr0, $vr0, $vr1 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 0 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %f4, ptr %P | 
|  | %R = fadd %f4 %p, zeroinitializer | 
|  | store %f4 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_f2(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_f2: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    fld.s $fa0, $a0, 4 | 
|  | ; LA32F-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA32F-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI1_0) | 
|  | ; LA32F-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI1_0) | 
|  | ; LA32F-NEXT:    movgr2fr.w $fa3, $a0 | 
|  | ; LA32F-NEXT:    ffint.s.w $fa3, $fa3 | 
|  | ; LA32F-NEXT:    fadd.s $fa1, $fa1, $fa3 | 
|  | ; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa2 | 
|  | ; LA32F-NEXT:    fst.s $fa0, $a1, 4 | 
|  | ; LA32F-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_f2: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    fld.s $fa0, $a0, 4 | 
|  | ; LA32D-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA32D-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI1_0) | 
|  | ; LA32D-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI1_0) | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa3, $a0 | 
|  | ; LA32D-NEXT:    ffint.s.w $fa3, $fa3 | 
|  | ; LA32D-NEXT:    fadd.s $fa1, $fa1, $fa3 | 
|  | ; LA32D-NEXT:    fadd.s $fa0, $fa0, $fa2 | 
|  | ; LA32D-NEXT:    fst.s $fa0, $a1, 4 | 
|  | ; LA32D-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_f2: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    fld.s $fa0, $a0, 4 | 
|  | ; LA64F-NEXT:    fld.s $fa1, $a0, 0 | 
|  | ; LA64F-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA64F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI1_0) | 
|  | ; LA64F-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI1_0) | 
|  | ; LA64F-NEXT:    movgr2fr.w $fa3, $a0 | 
|  | ; LA64F-NEXT:    ffint.s.w $fa3, $fa3 | 
|  | ; LA64F-NEXT:    fadd.s $fa1, $fa1, $fa3 | 
|  | ; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa2 | 
|  | ; LA64F-NEXT:    fst.s $fa0, $a1, 4 | 
|  | ; LA64F-NEXT:    fst.s $fa1, $a1, 0 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_f2: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    ld.d $a0, $a0, 0 | 
|  | ; LA64D-NEXT:    vinsgr2vr.d $vr0, $a0, 0 | 
|  | ; LA64D-NEXT:    lu12i.w $a0, 260096 | 
|  | ; LA64D-NEXT:    lu52i.d $a0, $a0, 1024 | 
|  | ; LA64D-NEXT:    vreplgr2vr.d $vr1, $a0 | 
|  | ; LA64D-NEXT:    vfadd.s $vr0, $vr0, $vr1 | 
|  | ; LA64D-NEXT:    vpickve2gr.d $a0, $vr0, 0 | 
|  | ; LA64D-NEXT:    st.d $a0, $a1, 0 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %f2, ptr %P | 
|  | %R = fadd %f2 %p, < float 1.000000e+00, float 2.000000e+00 > | 
|  | store %f2 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_f4(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_f4: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA32F-NEXT:    fld.s $fa1, $a0, 8 | 
|  | ; LA32F-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA32F-NEXT:    fld.s $fa3, $a0, 0 | 
|  | ; LA32F-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32F-NEXT:    movgr2fr.w $fa4, $a0 | 
|  | ; LA32F-NEXT:    ffint.s.w $fa4, $fa4 | 
|  | ; LA32F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_0) | 
|  | ; LA32F-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI2_0) | 
|  | ; LA32F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_1) | 
|  | ; LA32F-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI2_1) | 
|  | ; LA32F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_2) | 
|  | ; LA32F-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI2_2) | 
|  | ; LA32F-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA32F-NEXT:    fadd.s $fa2, $fa2, $fa5 | 
|  | ; LA32F-NEXT:    fadd.s $fa1, $fa1, $fa6 | 
|  | ; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa7 | 
|  | ; LA32F-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA32F-NEXT:    fst.s $fa1, $a1, 8 | 
|  | ; LA32F-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA32F-NEXT:    fst.s $fa3, $a1, 0 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_f4: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA32D-NEXT:    fld.s $fa1, $a0, 8 | 
|  | ; LA32D-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA32D-NEXT:    fld.s $fa3, $a0, 0 | 
|  | ; LA32D-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa4, $a0 | 
|  | ; LA32D-NEXT:    ffint.s.w $fa4, $fa4 | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_0) | 
|  | ; LA32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI2_0) | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_1) | 
|  | ; LA32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI2_1) | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_2) | 
|  | ; LA32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI2_2) | 
|  | ; LA32D-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA32D-NEXT:    fadd.s $fa2, $fa2, $fa5 | 
|  | ; LA32D-NEXT:    fadd.s $fa1, $fa1, $fa6 | 
|  | ; LA32D-NEXT:    fadd.s $fa0, $fa0, $fa7 | 
|  | ; LA32D-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA32D-NEXT:    fst.s $fa1, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA32D-NEXT:    fst.s $fa3, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_f4: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    fld.s $fa0, $a0, 12 | 
|  | ; LA64F-NEXT:    fld.s $fa1, $a0, 8 | 
|  | ; LA64F-NEXT:    fld.s $fa2, $a0, 4 | 
|  | ; LA64F-NEXT:    fld.s $fa3, $a0, 0 | 
|  | ; LA64F-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA64F-NEXT:    movgr2fr.w $fa4, $a0 | 
|  | ; LA64F-NEXT:    ffint.s.w $fa4, $fa4 | 
|  | ; LA64F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_0) | 
|  | ; LA64F-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI2_0) | 
|  | ; LA64F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_1) | 
|  | ; LA64F-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI2_1) | 
|  | ; LA64F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_2) | 
|  | ; LA64F-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI2_2) | 
|  | ; LA64F-NEXT:    fadd.s $fa3, $fa3, $fa4 | 
|  | ; LA64F-NEXT:    fadd.s $fa2, $fa2, $fa5 | 
|  | ; LA64F-NEXT:    fadd.s $fa1, $fa1, $fa6 | 
|  | ; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa7 | 
|  | ; LA64F-NEXT:    fst.s $fa0, $a1, 12 | 
|  | ; LA64F-NEXT:    fst.s $fa1, $a1, 8 | 
|  | ; LA64F-NEXT:    fst.s $fa2, $a1, 4 | 
|  | ; LA64F-NEXT:    fst.s $fa3, $a1, 0 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_f4: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    vld $vr0, $a0, 0 | 
|  | ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI2_0) | 
|  | ; LA64D-NEXT:    vld $vr1, $a0, %pc_lo12(.LCPI2_0) | 
|  | ; LA64D-NEXT:    vfadd.s $vr0, $vr0, $vr1 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 0 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %f4, ptr %P | 
|  | %R = fadd %f4 %p, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > | 
|  | store %f4 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_f8(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_f8: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    addi.w $a2, $zero, 1 | 
|  | ; LA32F-NEXT:    movgr2fr.w $fa0, $a2 | 
|  | ; LA32F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_0) | 
|  | ; LA32F-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_0) | 
|  | ; LA32F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_1) | 
|  | ; LA32F-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI3_1) | 
|  | ; LA32F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_2) | 
|  | ; LA32F-NEXT:    fld.s $fa3, $a2, %pc_lo12(.LCPI3_2) | 
|  | ; LA32F-NEXT:    fld.s $fa4, $a0, 28 | 
|  | ; LA32F-NEXT:    fld.s $fa5, $a0, 24 | 
|  | ; LA32F-NEXT:    fld.s $fa6, $a0, 12 | 
|  | ; LA32F-NEXT:    fld.s $fa7, $a0, 8 | 
|  | ; LA32F-NEXT:    fld.s $ft0, $a0, 0 | 
|  | ; LA32F-NEXT:    fld.s $ft1, $a0, 16 | 
|  | ; LA32F-NEXT:    fld.s $ft2, $a0, 4 | 
|  | ; LA32F-NEXT:    ffint.s.w $fa0, $fa0 | 
|  | ; LA32F-NEXT:    fadd.s $ft0, $ft0, $fa0 | 
|  | ; LA32F-NEXT:    fadd.s $fa0, $ft1, $fa0 | 
|  | ; LA32F-NEXT:    fld.s $ft1, $a0, 20 | 
|  | ; LA32F-NEXT:    fadd.s $ft2, $ft2, $fa1 | 
|  | ; LA32F-NEXT:    fadd.s $fa7, $fa7, $fa2 | 
|  | ; LA32F-NEXT:    fadd.s $fa6, $fa6, $fa3 | 
|  | ; LA32F-NEXT:    fadd.s $fa1, $ft1, $fa1 | 
|  | ; LA32F-NEXT:    fadd.s $fa2, $fa5, $fa2 | 
|  | ; LA32F-NEXT:    fadd.s $fa3, $fa4, $fa3 | 
|  | ; LA32F-NEXT:    fst.s $fa3, $a1, 28 | 
|  | ; LA32F-NEXT:    fst.s $fa2, $a1, 24 | 
|  | ; LA32F-NEXT:    fst.s $fa1, $a1, 20 | 
|  | ; LA32F-NEXT:    fst.s $fa6, $a1, 12 | 
|  | ; LA32F-NEXT:    fst.s $fa7, $a1, 8 | 
|  | ; LA32F-NEXT:    fst.s $ft2, $a1, 4 | 
|  | ; LA32F-NEXT:    fst.s $fa0, $a1, 16 | 
|  | ; LA32F-NEXT:    fst.s $ft0, $a1, 0 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_f8: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    addi.w $a2, $zero, 1 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa0, $a2 | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_0) | 
|  | ; LA32D-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_0) | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_1) | 
|  | ; LA32D-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI3_1) | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_2) | 
|  | ; LA32D-NEXT:    fld.s $fa3, $a2, %pc_lo12(.LCPI3_2) | 
|  | ; LA32D-NEXT:    fld.s $fa4, $a0, 28 | 
|  | ; LA32D-NEXT:    fld.s $fa5, $a0, 24 | 
|  | ; LA32D-NEXT:    fld.s $fa6, $a0, 12 | 
|  | ; LA32D-NEXT:    fld.s $fa7, $a0, 8 | 
|  | ; LA32D-NEXT:    fld.s $ft0, $a0, 0 | 
|  | ; LA32D-NEXT:    fld.s $ft1, $a0, 16 | 
|  | ; LA32D-NEXT:    fld.s $ft2, $a0, 4 | 
|  | ; LA32D-NEXT:    ffint.s.w $fa0, $fa0 | 
|  | ; LA32D-NEXT:    fadd.s $ft0, $ft0, $fa0 | 
|  | ; LA32D-NEXT:    fadd.s $fa0, $ft1, $fa0 | 
|  | ; LA32D-NEXT:    fld.s $ft1, $a0, 20 | 
|  | ; LA32D-NEXT:    fadd.s $ft2, $ft2, $fa1 | 
|  | ; LA32D-NEXT:    fadd.s $fa7, $fa7, $fa2 | 
|  | ; LA32D-NEXT:    fadd.s $fa6, $fa6, $fa3 | 
|  | ; LA32D-NEXT:    fadd.s $fa1, $ft1, $fa1 | 
|  | ; LA32D-NEXT:    fadd.s $fa2, $fa5, $fa2 | 
|  | ; LA32D-NEXT:    fadd.s $fa3, $fa4, $fa3 | 
|  | ; LA32D-NEXT:    fst.s $fa3, $a1, 28 | 
|  | ; LA32D-NEXT:    fst.s $fa2, $a1, 24 | 
|  | ; LA32D-NEXT:    fst.s $fa1, $a1, 20 | 
|  | ; LA32D-NEXT:    fst.s $fa6, $a1, 12 | 
|  | ; LA32D-NEXT:    fst.s $fa7, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.s $ft2, $a1, 4 | 
|  | ; LA32D-NEXT:    fst.s $fa0, $a1, 16 | 
|  | ; LA32D-NEXT:    fst.s $ft0, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_f8: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    addi.w $a2, $zero, 1 | 
|  | ; LA64F-NEXT:    movgr2fr.w $fa0, $a2 | 
|  | ; LA64F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_0) | 
|  | ; LA64F-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_0) | 
|  | ; LA64F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_1) | 
|  | ; LA64F-NEXT:    fld.s $fa2, $a2, %pc_lo12(.LCPI3_1) | 
|  | ; LA64F-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_2) | 
|  | ; LA64F-NEXT:    fld.s $fa3, $a2, %pc_lo12(.LCPI3_2) | 
|  | ; LA64F-NEXT:    fld.s $fa4, $a0, 28 | 
|  | ; LA64F-NEXT:    fld.s $fa5, $a0, 24 | 
|  | ; LA64F-NEXT:    fld.s $fa6, $a0, 12 | 
|  | ; LA64F-NEXT:    fld.s $fa7, $a0, 8 | 
|  | ; LA64F-NEXT:    fld.s $ft0, $a0, 0 | 
|  | ; LA64F-NEXT:    fld.s $ft1, $a0, 16 | 
|  | ; LA64F-NEXT:    fld.s $ft2, $a0, 4 | 
|  | ; LA64F-NEXT:    ffint.s.w $fa0, $fa0 | 
|  | ; LA64F-NEXT:    fadd.s $ft0, $ft0, $fa0 | 
|  | ; LA64F-NEXT:    fadd.s $fa0, $ft1, $fa0 | 
|  | ; LA64F-NEXT:    fld.s $ft1, $a0, 20 | 
|  | ; LA64F-NEXT:    fadd.s $ft2, $ft2, $fa1 | 
|  | ; LA64F-NEXT:    fadd.s $fa7, $fa7, $fa2 | 
|  | ; LA64F-NEXT:    fadd.s $fa6, $fa6, $fa3 | 
|  | ; LA64F-NEXT:    fadd.s $fa1, $ft1, $fa1 | 
|  | ; LA64F-NEXT:    fadd.s $fa2, $fa5, $fa2 | 
|  | ; LA64F-NEXT:    fadd.s $fa3, $fa4, $fa3 | 
|  | ; LA64F-NEXT:    fst.s $fa3, $a1, 28 | 
|  | ; LA64F-NEXT:    fst.s $fa2, $a1, 24 | 
|  | ; LA64F-NEXT:    fst.s $fa1, $a1, 20 | 
|  | ; LA64F-NEXT:    fst.s $fa6, $a1, 12 | 
|  | ; LA64F-NEXT:    fst.s $fa7, $a1, 8 | 
|  | ; LA64F-NEXT:    fst.s $ft2, $a1, 4 | 
|  | ; LA64F-NEXT:    fst.s $fa0, $a1, 16 | 
|  | ; LA64F-NEXT:    fst.s $ft0, $a1, 0 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_f8: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    vld $vr0, $a0, 16 | 
|  | ; LA64D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_0) | 
|  | ; LA64D-NEXT:    vld $vr1, $a2, %pc_lo12(.LCPI3_0) | 
|  | ; LA64D-NEXT:    vld $vr2, $a0, 0 | 
|  | ; LA64D-NEXT:    vfadd.s $vr0, $vr0, $vr1 | 
|  | ; LA64D-NEXT:    vfadd.s $vr1, $vr2, $vr1 | 
|  | ; LA64D-NEXT:    vst $vr1, $a1, 0 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 16 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %f8, ptr %P | 
|  | %R = fadd %f8 %p, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > | 
|  | store %f8 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_d2(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_d2: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, -32 | 
|  | ; LA32F-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s3, $sp, 8 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $fp, $a0, 8 | 
|  | ; LA32F-NEXT:    ld.w $s0, $a0, 12 | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 0 | 
|  | ; LA32F-NEXT:    ld.w $a4, $a0, 4 | 
|  | ; LA32F-NEXT:    move $s1, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 261888 | 
|  | ; LA32F-NEXT:    move $a0, $a2 | 
|  | ; LA32F-NEXT:    move $a1, $a4 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s2, $a0 | 
|  | ; LA32F-NEXT:    move $s3, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262144 | 
|  | ; LA32F-NEXT:    move $a0, $fp | 
|  | ; LA32F-NEXT:    move $a1, $s0 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $s1, 8 | 
|  | ; LA32F-NEXT:    st.w $a1, $s1, 12 | 
|  | ; LA32F-NEXT:    st.w $s2, $s1, 0 | 
|  | ; LA32F-NEXT:    st.w $s3, $s1, 4 | 
|  | ; LA32F-NEXT:    ld.w $s3, $sp, 8 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, 32 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_d2: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    fld.d $fa0, $a0, 8 | 
|  | ; LA32D-NEXT:    fld.d $fa1, $a0, 0 | 
|  | ; LA32D-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa2, $a0 | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI4_0) | 
|  | ; LA32D-NEXT:    fld.d $fa3, $a0, %pc_lo12(.LCPI4_0) | 
|  | ; LA32D-NEXT:    ffint.s.w $fa2, $fa2 | 
|  | ; LA32D-NEXT:    fcvt.d.s $fa2, $fa2 | 
|  | ; LA32D-NEXT:    fadd.d $fa1, $fa1, $fa2 | 
|  | ; LA32D-NEXT:    fadd.d $fa0, $fa0, $fa3 | 
|  | ; LA32D-NEXT:    fst.d $fa0, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.d $fa1, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_d2: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, -32 | 
|  | ; LA64F-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    ld.d $fp, $a0, 8 | 
|  | ; LA64F-NEXT:    ld.d $a0, $a0, 0 | 
|  | ; LA64F-NEXT:    move $s0, $a1 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $zero, 1023 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s1, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $zero, 1024 | 
|  | ; LA64F-NEXT:    move $a0, $fp | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    st.d $a0, $s0, 8 | 
|  | ; LA64F-NEXT:    st.d $s1, $s0, 0 | 
|  | ; LA64F-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, 32 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_d2: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    vld $vr0, $a0, 0 | 
|  | ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI4_0) | 
|  | ; LA64D-NEXT:    vld $vr1, $a0, %pc_lo12(.LCPI4_0) | 
|  | ; LA64D-NEXT:    vfadd.d $vr0, $vr0, $vr1 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 0 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %d2, ptr %P | 
|  | %R = fadd %d2 %p, < double 1.000000e+00, double 2.000000e+00 > | 
|  | store %d2 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_d4(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_d4: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, -48 | 
|  | ; LA32F-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $fp, $sp, 40 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s0, $sp, 36 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s1, $sp, 32 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s2, $sp, 28 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s3, $sp, 24 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s4, $sp, 20 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s5, $sp, 16 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s6, $sp, 12 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s7, $sp, 8 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $fp, $a0, 24 | 
|  | ; LA32F-NEXT:    ld.w $s0, $a0, 28 | 
|  | ; LA32F-NEXT:    ld.w $s1, $a0, 16 | 
|  | ; LA32F-NEXT:    ld.w $s2, $a0, 20 | 
|  | ; LA32F-NEXT:    ld.w $s3, $a0, 8 | 
|  | ; LA32F-NEXT:    ld.w $s4, $a0, 12 | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 0 | 
|  | ; LA32F-NEXT:    ld.w $a4, $a0, 4 | 
|  | ; LA32F-NEXT:    move $s5, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 261888 | 
|  | ; LA32F-NEXT:    move $a0, $a2 | 
|  | ; LA32F-NEXT:    move $a1, $a4 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s6, $a0 | 
|  | ; LA32F-NEXT:    move $s7, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262144 | 
|  | ; LA32F-NEXT:    move $a0, $s3 | 
|  | ; LA32F-NEXT:    move $a1, $s4 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s3, $a0 | 
|  | ; LA32F-NEXT:    move $s4, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262272 | 
|  | ; LA32F-NEXT:    move $a0, $s1 | 
|  | ; LA32F-NEXT:    move $a1, $s2 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s1, $a0 | 
|  | ; LA32F-NEXT:    move $s2, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262400 | 
|  | ; LA32F-NEXT:    move $a0, $fp | 
|  | ; LA32F-NEXT:    move $a1, $s0 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $s5, 24 | 
|  | ; LA32F-NEXT:    st.w $a1, $s5, 28 | 
|  | ; LA32F-NEXT:    st.w $s1, $s5, 16 | 
|  | ; LA32F-NEXT:    st.w $s2, $s5, 20 | 
|  | ; LA32F-NEXT:    st.w $s3, $s5, 8 | 
|  | ; LA32F-NEXT:    st.w $s4, $s5, 12 | 
|  | ; LA32F-NEXT:    st.w $s6, $s5, 0 | 
|  | ; LA32F-NEXT:    st.w $s7, $s5, 4 | 
|  | ; LA32F-NEXT:    ld.w $s7, $sp, 8 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s6, $sp, 12 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s5, $sp, 16 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s4, $sp, 20 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s3, $sp, 24 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s2, $sp, 28 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s1, $sp, 32 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s0, $sp, 36 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $fp, $sp, 40 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, 48 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_d4: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    fld.d $fa0, $a0, 24 | 
|  | ; LA32D-NEXT:    fld.d $fa1, $a0, 16 | 
|  | ; LA32D-NEXT:    fld.d $fa2, $a0, 8 | 
|  | ; LA32D-NEXT:    fld.d $fa3, $a0, 0 | 
|  | ; LA32D-NEXT:    addi.w $a0, $zero, 1 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa4, $a0 | 
|  | ; LA32D-NEXT:    ffint.s.w $fa4, $fa4 | 
|  | ; LA32D-NEXT:    fcvt.d.s $fa4, $fa4 | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0) | 
|  | ; LA32D-NEXT:    fld.d $fa5, $a0, %pc_lo12(.LCPI5_0) | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1) | 
|  | ; LA32D-NEXT:    fld.d $fa6, $a0, %pc_lo12(.LCPI5_1) | 
|  | ; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2) | 
|  | ; LA32D-NEXT:    fld.d $fa7, $a0, %pc_lo12(.LCPI5_2) | 
|  | ; LA32D-NEXT:    fadd.d $fa3, $fa3, $fa4 | 
|  | ; LA32D-NEXT:    fadd.d $fa2, $fa2, $fa5 | 
|  | ; LA32D-NEXT:    fadd.d $fa1, $fa1, $fa6 | 
|  | ; LA32D-NEXT:    fadd.d $fa0, $fa0, $fa7 | 
|  | ; LA32D-NEXT:    fst.d $fa0, $a1, 24 | 
|  | ; LA32D-NEXT:    fst.d $fa1, $a1, 16 | 
|  | ; LA32D-NEXT:    fst.d $fa2, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.d $fa3, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_d4: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, -48 | 
|  | ; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s2, $sp, 8 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s3, $sp, 0 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    ld.d $fp, $a0, 24 | 
|  | ; LA64F-NEXT:    ld.d $s0, $a0, 8 | 
|  | ; LA64F-NEXT:    ld.d $s1, $a0, 0 | 
|  | ; LA64F-NEXT:    ld.d $a0, $a0, 16 | 
|  | ; LA64F-NEXT:    move $s2, $a1 | 
|  | ; LA64F-NEXT:    ori $a1, $zero, 0 | 
|  | ; LA64F-NEXT:    lu32i.d $a1, -524288 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $a1, 1024 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s3, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $zero, 1023 | 
|  | ; LA64F-NEXT:    move $a0, $s1 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s1, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $zero, 1024 | 
|  | ; LA64F-NEXT:    move $a0, $s0 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s0, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $a1, $zero, 1025 | 
|  | ; LA64F-NEXT:    move $a0, $fp | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    st.d $a0, $s2, 24 | 
|  | ; LA64F-NEXT:    st.d $s0, $s2, 8 | 
|  | ; LA64F-NEXT:    st.d $s1, $s2, 0 | 
|  | ; LA64F-NEXT:    st.d $s3, $s2, 16 | 
|  | ; LA64F-NEXT:    ld.d $s3, $sp, 0 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s2, $sp, 8 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, 48 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_d4: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    vld $vr0, $a0, 0 | 
|  | ; LA64D-NEXT:    vld $vr1, $a0, 16 | 
|  | ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0) | 
|  | ; LA64D-NEXT:    vld $vr2, $a0, %pc_lo12(.LCPI5_0) | 
|  | ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1) | 
|  | ; LA64D-NEXT:    vld $vr3, $a0, %pc_lo12(.LCPI5_1) | 
|  | ; LA64D-NEXT:    vfadd.d $vr1, $vr1, $vr2 | 
|  | ; LA64D-NEXT:    vfadd.d $vr0, $vr0, $vr3 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 0 | 
|  | ; LA64D-NEXT:    vst $vr1, $a1, 16 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %d4, ptr %P | 
|  | %R = fadd %d4 %p, < double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00 > | 
|  | store %d4 %R, ptr %S | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @test_d8(ptr %P, ptr %S) nounwind { | 
|  | ; LA32F-LABEL: test_d8: | 
|  | ; LA32F:       # %bb.0: | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, -96 | 
|  | ; LA32F-NEXT:    st.w $ra, $sp, 92 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $fp, $sp, 88 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s0, $sp, 84 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s1, $sp, 80 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s2, $sp, 76 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s3, $sp, 72 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s4, $sp, 68 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s5, $sp, 64 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s6, $sp, 60 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s7, $sp, 56 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 56 | 
|  | ; LA32F-NEXT:    st.w $a2, $sp, 48 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 60 | 
|  | ; LA32F-NEXT:    st.w $a2, $sp, 44 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 48 | 
|  | ; LA32F-NEXT:    st.w $a2, $sp, 32 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 52 | 
|  | ; LA32F-NEXT:    st.w $a2, $sp, 28 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $s8, $a0, 40 | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 44 | 
|  | ; LA32F-NEXT:    st.w $a2, $sp, 16 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    ld.w $s3, $a0, 32 | 
|  | ; LA32F-NEXT:    ld.w $s4, $a0, 36 | 
|  | ; LA32F-NEXT:    ld.w $s5, $a0, 24 | 
|  | ; LA32F-NEXT:    ld.w $s6, $a0, 28 | 
|  | ; LA32F-NEXT:    ld.w $s1, $a0, 16 | 
|  | ; LA32F-NEXT:    ld.w $s2, $a0, 20 | 
|  | ; LA32F-NEXT:    ld.w $s7, $a0, 8 | 
|  | ; LA32F-NEXT:    ld.w $s0, $a0, 12 | 
|  | ; LA32F-NEXT:    ld.w $a2, $a0, 0 | 
|  | ; LA32F-NEXT:    ld.w $a4, $a0, 4 | 
|  | ; LA32F-NEXT:    move $fp, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 261888 | 
|  | ; LA32F-NEXT:    move $a0, $a2 | 
|  | ; LA32F-NEXT:    move $a1, $a4 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $sp, 40 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $a1, $sp, 36 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262144 | 
|  | ; LA32F-NEXT:    move $a0, $s7 | 
|  | ; LA32F-NEXT:    move $a1, $s0 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    move $s0, $a3 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $sp, 24 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    st.w $a1, $sp, 20 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    lu12i.w $s7, 262272 | 
|  | ; LA32F-NEXT:    move $a0, $s1 | 
|  | ; LA32F-NEXT:    move $a1, $s2 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    move $a3, $s7 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $sp, 12 # 4-byte Folded Spill | 
|  | ; LA32F-NEXT:    move $s2, $a1 | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262400 | 
|  | ; LA32F-NEXT:    move $a0, $s5 | 
|  | ; LA32F-NEXT:    move $a1, $s6 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s5, $a0 | 
|  | ; LA32F-NEXT:    move $s6, $a1 | 
|  | ; LA32F-NEXT:    move $a0, $s3 | 
|  | ; LA32F-NEXT:    move $a1, $s4 | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 261888 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s3, $a0 | 
|  | ; LA32F-NEXT:    move $s4, $a1 | 
|  | ; LA32F-NEXT:    move $a0, $s8 | 
|  | ; LA32F-NEXT:    ld.w $a1, $sp, 16 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    move $a3, $s0 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s8, $a0 | 
|  | ; LA32F-NEXT:    move $s0, $a1 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 32 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $a1, $sp, 28 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    move $a3, $s7 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    move $s7, $a0 | 
|  | ; LA32F-NEXT:    move $s1, $a1 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 48 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $a1, $sp, 44 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    move $a2, $zero | 
|  | ; LA32F-NEXT:    lu12i.w $a3, 262400 | 
|  | ; LA32F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 56 | 
|  | ; LA32F-NEXT:    st.w $a1, $fp, 60 | 
|  | ; LA32F-NEXT:    st.w $s7, $fp, 48 | 
|  | ; LA32F-NEXT:    st.w $s1, $fp, 52 | 
|  | ; LA32F-NEXT:    st.w $s8, $fp, 40 | 
|  | ; LA32F-NEXT:    st.w $s0, $fp, 44 | 
|  | ; LA32F-NEXT:    st.w $s3, $fp, 32 | 
|  | ; LA32F-NEXT:    st.w $s4, $fp, 36 | 
|  | ; LA32F-NEXT:    st.w $s5, $fp, 24 | 
|  | ; LA32F-NEXT:    st.w $s6, $fp, 28 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 12 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 16 | 
|  | ; LA32F-NEXT:    st.w $s2, $fp, 20 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 24 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 8 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 20 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 12 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 40 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 0 | 
|  | ; LA32F-NEXT:    ld.w $a0, $sp, 36 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    st.w $a0, $fp, 4 | 
|  | ; LA32F-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s7, $sp, 56 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s6, $sp, 60 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s5, $sp, 64 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s4, $sp, 68 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s3, $sp, 72 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s2, $sp, 76 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s1, $sp, 80 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $s0, $sp, 84 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $fp, $sp, 88 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    ld.w $ra, $sp, 92 # 4-byte Folded Reload | 
|  | ; LA32F-NEXT:    addi.w $sp, $sp, 96 | 
|  | ; LA32F-NEXT:    ret | 
|  | ; | 
|  | ; LA32D-LABEL: test_d8: | 
|  | ; LA32D:       # %bb.0: | 
|  | ; LA32D-NEXT:    addi.w $a2, $zero, 1 | 
|  | ; LA32D-NEXT:    movgr2fr.w $fa0, $a2 | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI6_0) | 
|  | ; LA32D-NEXT:    fld.d $fa1, $a2, %pc_lo12(.LCPI6_0) | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI6_1) | 
|  | ; LA32D-NEXT:    fld.d $fa2, $a2, %pc_lo12(.LCPI6_1) | 
|  | ; LA32D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI6_2) | 
|  | ; LA32D-NEXT:    fld.d $fa3, $a2, %pc_lo12(.LCPI6_2) | 
|  | ; LA32D-NEXT:    fld.d $fa4, $a0, 56 | 
|  | ; LA32D-NEXT:    fld.d $fa5, $a0, 48 | 
|  | ; LA32D-NEXT:    fld.d $fa6, $a0, 24 | 
|  | ; LA32D-NEXT:    fld.d $fa7, $a0, 16 | 
|  | ; LA32D-NEXT:    fld.d $ft0, $a0, 8 | 
|  | ; LA32D-NEXT:    fld.d $ft1, $a0, 0 | 
|  | ; LA32D-NEXT:    fld.d $ft2, $a0, 32 | 
|  | ; LA32D-NEXT:    ffint.s.w $fa0, $fa0 | 
|  | ; LA32D-NEXT:    fcvt.d.s $fa0, $fa0 | 
|  | ; LA32D-NEXT:    fadd.d $ft1, $ft1, $fa0 | 
|  | ; LA32D-NEXT:    fadd.d $fa0, $ft2, $fa0 | 
|  | ; LA32D-NEXT:    fld.d $ft2, $a0, 40 | 
|  | ; LA32D-NEXT:    fadd.d $ft0, $ft0, $fa1 | 
|  | ; LA32D-NEXT:    fadd.d $fa7, $fa7, $fa2 | 
|  | ; LA32D-NEXT:    fadd.d $fa6, $fa6, $fa3 | 
|  | ; LA32D-NEXT:    fadd.d $fa1, $ft2, $fa1 | 
|  | ; LA32D-NEXT:    fadd.d $fa2, $fa5, $fa2 | 
|  | ; LA32D-NEXT:    fadd.d $fa3, $fa4, $fa3 | 
|  | ; LA32D-NEXT:    fst.d $fa3, $a1, 56 | 
|  | ; LA32D-NEXT:    fst.d $fa2, $a1, 48 | 
|  | ; LA32D-NEXT:    fst.d $fa1, $a1, 40 | 
|  | ; LA32D-NEXT:    fst.d $fa6, $a1, 24 | 
|  | ; LA32D-NEXT:    fst.d $fa7, $a1, 16 | 
|  | ; LA32D-NEXT:    fst.d $ft0, $a1, 8 | 
|  | ; LA32D-NEXT:    fst.d $fa0, $a1, 32 | 
|  | ; LA32D-NEXT:    fst.d $ft1, $a1, 0 | 
|  | ; LA32D-NEXT:    ret | 
|  | ; | 
|  | ; LA64F-LABEL: test_d8: | 
|  | ; LA64F:       # %bb.0: | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, -112 | 
|  | ; LA64F-NEXT:    st.d $ra, $sp, 104 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $fp, $sp, 96 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s0, $sp, 88 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s1, $sp, 80 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s2, $sp, 72 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s3, $sp, 64 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s4, $sp, 56 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s5, $sp, 48 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s6, $sp, 40 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s7, $sp, 32 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    st.d $s8, $sp, 24 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    ld.d $a2, $a0, 56 | 
|  | ; LA64F-NEXT:    st.d $a2, $sp, 16 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    ld.d $s1, $a0, 40 | 
|  | ; LA64F-NEXT:    ld.d $s2, $a0, 32 | 
|  | ; LA64F-NEXT:    ld.d $s3, $a0, 24 | 
|  | ; LA64F-NEXT:    ld.d $s4, $a0, 8 | 
|  | ; LA64F-NEXT:    ld.d $s5, $a0, 0 | 
|  | ; LA64F-NEXT:    ld.d $s6, $a0, 48 | 
|  | ; LA64F-NEXT:    ld.d $a0, $a0, 16 | 
|  | ; LA64F-NEXT:    move $fp, $a1 | 
|  | ; LA64F-NEXT:    ori $a1, $zero, 0 | 
|  | ; LA64F-NEXT:    lu32i.d $a1, -524288 | 
|  | ; LA64F-NEXT:    lu52i.d $s7, $a1, 1024 | 
|  | ; LA64F-NEXT:    move $a1, $s7 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    st.d $a0, $sp, 8 # 8-byte Folded Spill | 
|  | ; LA64F-NEXT:    move $a0, $s6 | 
|  | ; LA64F-NEXT:    move $a1, $s7 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s6, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $s7, $zero, 1023 | 
|  | ; LA64F-NEXT:    move $a0, $s5 | 
|  | ; LA64F-NEXT:    move $a1, $s7 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s5, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $s0, $zero, 1024 | 
|  | ; LA64F-NEXT:    move $a0, $s4 | 
|  | ; LA64F-NEXT:    move $a1, $s0 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s4, $a0 | 
|  | ; LA64F-NEXT:    lu52i.d $s8, $zero, 1025 | 
|  | ; LA64F-NEXT:    move $a0, $s3 | 
|  | ; LA64F-NEXT:    move $a1, $s8 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s3, $a0 | 
|  | ; LA64F-NEXT:    move $a0, $s2 | 
|  | ; LA64F-NEXT:    move $a1, $s7 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s2, $a0 | 
|  | ; LA64F-NEXT:    move $a0, $s1 | 
|  | ; LA64F-NEXT:    move $a1, $s0 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    move $s0, $a0 | 
|  | ; LA64F-NEXT:    ld.d $a0, $sp, 16 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    move $a1, $s8 | 
|  | ; LA64F-NEXT:    bl %plt(__adddf3) | 
|  | ; LA64F-NEXT:    st.d $a0, $fp, 56 | 
|  | ; LA64F-NEXT:    st.d $s0, $fp, 40 | 
|  | ; LA64F-NEXT:    st.d $s2, $fp, 32 | 
|  | ; LA64F-NEXT:    st.d $s3, $fp, 24 | 
|  | ; LA64F-NEXT:    st.d $s4, $fp, 8 | 
|  | ; LA64F-NEXT:    st.d $s5, $fp, 0 | 
|  | ; LA64F-NEXT:    st.d $s6, $fp, 48 | 
|  | ; LA64F-NEXT:    ld.d $a0, $sp, 8 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    st.d $a0, $fp, 16 | 
|  | ; LA64F-NEXT:    ld.d $s8, $sp, 24 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s7, $sp, 32 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s6, $sp, 40 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s5, $sp, 48 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s4, $sp, 56 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s3, $sp, 64 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s2, $sp, 72 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s1, $sp, 80 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $s0, $sp, 88 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $fp, $sp, 96 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload | 
|  | ; LA64F-NEXT:    addi.d $sp, $sp, 112 | 
|  | ; LA64F-NEXT:    ret | 
|  | ; | 
|  | ; LA64D-LABEL: test_d8: | 
|  | ; LA64D:       # %bb.0: | 
|  | ; LA64D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI6_0) | 
|  | ; LA64D-NEXT:    vld $vr0, $a2, %pc_lo12(.LCPI6_0) | 
|  | ; LA64D-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI6_1) | 
|  | ; LA64D-NEXT:    vld $vr1, $a2, %pc_lo12(.LCPI6_1) | 
|  | ; LA64D-NEXT:    vld $vr2, $a0, 16 | 
|  | ; LA64D-NEXT:    vld $vr3, $a0, 0 | 
|  | ; LA64D-NEXT:    vld $vr4, $a0, 48 | 
|  | ; LA64D-NEXT:    vld $vr5, $a0, 32 | 
|  | ; LA64D-NEXT:    vfadd.d $vr2, $vr2, $vr0 | 
|  | ; LA64D-NEXT:    vfadd.d $vr3, $vr3, $vr1 | 
|  | ; LA64D-NEXT:    vfadd.d $vr0, $vr4, $vr0 | 
|  | ; LA64D-NEXT:    vfadd.d $vr1, $vr5, $vr1 | 
|  | ; LA64D-NEXT:    vst $vr1, $a1, 32 | 
|  | ; LA64D-NEXT:    vst $vr0, $a1, 48 | 
|  | ; LA64D-NEXT:    vst $vr3, $a1, 0 | 
|  | ; LA64D-NEXT:    vst $vr2, $a1, 16 | 
|  | ; LA64D-NEXT:    ret | 
|  | %p = load %d8, ptr %P | 
|  | %R = fadd %d8 %p, < double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00 > | 
|  | store %d8 %R, ptr %S | 
|  | ret void | 
|  | } |