|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,FULLFP16 | 
|  | ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,MVEFP | 
|  | ; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,FULLFP16 | 
|  | ; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,MVEFP | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @sqrt_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: sqrt_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    vsqrt.f32 s3, s3 | 
|  | ; CHECK-NEXT:    vsqrt.f32 s2, s2 | 
|  | ; CHECK-NEXT:    vsqrt.f32 s1, s1 | 
|  | ; CHECK-NEXT:    vsqrt.f32 s0, s0 | 
|  | ; CHECK-NEXT:    bx lr | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @sqrt_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: sqrt_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    vmovx.f16 s4, s0 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s0, s0 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s4, s4 | 
|  | ; CHECK-NEXT:    vins.f16 s0, s4 | 
|  | ; CHECK-NEXT:    vmovx.f16 s4, s1 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s4, s4 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s1, s1 | 
|  | ; CHECK-NEXT:    vins.f16 s1, s4 | 
|  | ; CHECK-NEXT:    vmovx.f16 s4, s2 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s4, s4 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s2, s2 | 
|  | ; CHECK-NEXT:    vins.f16 s2, s4 | 
|  | ; CHECK-NEXT:    vmovx.f16 s4, s3 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s4, s4 | 
|  | ; CHECK-NEXT:    vsqrt.f16 s3, s3 | 
|  | ; CHECK-NEXT:    vins.f16 s3, s4 | 
|  | ; CHECK-NEXT:    bx lr | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.sqrt.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @sqrt_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: sqrt_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl sqrt | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl sqrt | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @cos_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: cos_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.cos.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @cos_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: cos_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl cosf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.cos.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @cos_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: cos_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl cos | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl cos | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.cos.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @sin_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: sin_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.sin.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @sin_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: sin_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl sinf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.sin.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @sin_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: sin_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl sin | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl sin | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.sin.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @tan_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: tan_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.tan.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @tan_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: tan_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl tanf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.tan.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @tan_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: tan_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl tan | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl tan | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.tan.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @exp_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: exp_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.exp.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @exp_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: exp_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl expf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.exp.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @exp_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: exp_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl exp | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl exp | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.exp.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @exp2_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: exp2_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.exp2.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @exp2_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: exp2_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl exp2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.exp2.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @exp2_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: exp2_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl exp2 | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl exp2 | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.exp2.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @log_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: log_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.log.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @log_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: log_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl logf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.log.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @log_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: log_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl log | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl log | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.log.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @log2_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: log2_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.log2.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @log2_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: log2_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log2f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.log2.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @log2_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: log2_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl log2 | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl log2 | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.log2.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @log10_float32_t(<4 x float> %src) { | 
|  | ; CHECK-LABEL: log10_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d9 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    mov r5, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov r4, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r5 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r7, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.log10.v4f32(<4 x float> %src) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @log10_float16_t(<8 x half> %src) { | 
|  | ; CHECK-LABEL: log10_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    mov r0, r1 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s20, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s20, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s21, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s22, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    bl log10f | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s23, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q5 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.log10.v8f16(<8 x half> %src) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @log10_float64_t(<2 x double> %src) { | 
|  | ; CHECK-LABEL: log10_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9} | 
|  | ; CHECK-NEXT:    vpush {d8, d9} | 
|  | ; CHECK-NEXT:    vmov q4, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d9 | 
|  | ; CHECK-NEXT:    bl log10 | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    mov r1, r3 | 
|  | ; CHECK-NEXT:    bl log10 | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.log10.v2f64(<2 x double> %src) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @pow_float32_t(<4 x float> %src1, <4 x float> %src2) { | 
|  | ; CHECK-LABEL: pow_float32_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r4, r5, r6, lr} | 
|  | ; CHECK-NEXT:    push {r4, r5, r6, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q1 | 
|  | ; CHECK-NEXT:    vmov q5, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r4, d11 | 
|  | ; CHECK-NEXT:    vmov r1, r5, d9 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    mov r6, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    mov r1, r5 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov r4, r2, d10 | 
|  | ; CHECK-NEXT:    vmov r5, r1, d8 | 
|  | ; CHECK-NEXT:    vmov s19, r0 | 
|  | ; CHECK-NEXT:    vmov s18, r6 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s17, r0 | 
|  | ; CHECK-NEXT:    mov r0, r4 | 
|  | ; CHECK-NEXT:    mov r1, r5 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r4, r5, r6, pc} | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %src1, <4 x float> %src2) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %src2) { | 
|  | ; CHECK-LABEL: pow_float16_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13} | 
|  | ; CHECK-NEXT:    vmov q5, q0 | 
|  | ; CHECK-NEXT:    vmov q4, q1 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s20 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s20 | 
|  | ; CHECK-NEXT:    vmov r2, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s16 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    vmov s16, r0 | 
|  | ; CHECK-NEXT:    mov r0, r2 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s24, s16 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s24, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s21 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s25, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s21 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s17 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s25, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s22 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s26, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s22 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s18 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s26, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s23 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtb.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtb.f16.f32 s27, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s23 | 
|  | ; CHECK-NEXT:    vmov r0, s0 | 
|  | ; CHECK-NEXT:    vcvtt.f32.f16 s0, s19 | 
|  | ; CHECK-NEXT:    vmov r1, s0 | 
|  | ; CHECK-NEXT:    bl powf | 
|  | ; CHECK-NEXT:    vmov s0, r0 | 
|  | ; CHECK-NEXT:    vcvtt.f16.f32 s27, s0 | 
|  | ; CHECK-NEXT:    vmov q0, q6 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.pow.v8f16(<8 x half> %src1, <8 x half> %src2) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @pow_float64_t(<2 x double> %src1, <2 x double> %src2) { | 
|  | ; CHECK-LABEL: pow_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    .vsave {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vpush {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    vmov q4, q1 | 
|  | ; CHECK-NEXT:    vmov q5, q0 | 
|  | ; CHECK-NEXT:    vmov r0, r1, d11 | 
|  | ; CHECK-NEXT:    vmov r2, r3, d9 | 
|  | ; CHECK-NEXT:    bl pow | 
|  | ; CHECK-NEXT:    vmov lr, r12, d10 | 
|  | ; CHECK-NEXT:    vmov r2, r3, d8 | 
|  | ; CHECK-NEXT:    vmov d9, r0, r1 | 
|  | ; CHECK-NEXT:    mov r0, lr | 
|  | ; CHECK-NEXT:    mov r1, r12 | 
|  | ; CHECK-NEXT:    bl pow | 
|  | ; CHECK-NEXT:    vmov d8, r0, r1 | 
|  | ; CHECK-NEXT:    vmov q0, q4 | 
|  | ; CHECK-NEXT:    vpop {d8, d9, d10, d11} | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %src1, <2 x double> %src2) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <4 x float> @copysign_float32_t(<4 x float> %src1, <4 x float> %src2) { | 
|  | ; FULLFP16-LABEL: copysign_float32_t: | 
|  | ; FULLFP16:       @ %bb.0: @ %entry | 
|  | ; FULLFP16-NEXT:    .save {r4, r5, r7, lr} | 
|  | ; FULLFP16-NEXT:    push {r4, r5, r7, lr} | 
|  | ; FULLFP16-NEXT:    vmov r12, r1, d2 | 
|  | ; FULLFP16-NEXT:    vmov r2, lr, d3 | 
|  | ; FULLFP16-NEXT:    vmov r3, r0, d0 | 
|  | ; FULLFP16-NEXT:    vmov r4, r5, d1 | 
|  | ; FULLFP16-NEXT:    lsrs r1, r1, #31 | 
|  | ; FULLFP16-NEXT:    bfi r0, r1, #31, #1 | 
|  | ; FULLFP16-NEXT:    lsrs r1, r2, #31 | 
|  | ; FULLFP16-NEXT:    bfi r4, r1, #31, #1 | 
|  | ; FULLFP16-NEXT:    lsr.w r1, lr, #31 | 
|  | ; FULLFP16-NEXT:    bfi r5, r1, #31, #1 | 
|  | ; FULLFP16-NEXT:    lsr.w r1, r12, #31 | 
|  | ; FULLFP16-NEXT:    bfi r3, r1, #31, #1 | 
|  | ; FULLFP16-NEXT:    vmov s2, r4 | 
|  | ; FULLFP16-NEXT:    vmov s3, r5 | 
|  | ; FULLFP16-NEXT:    vmov s1, r0 | 
|  | ; FULLFP16-NEXT:    vmov s0, r3 | 
|  | ; FULLFP16-NEXT:    pop {r4, r5, r7, pc} | 
|  | ; | 
|  | ; MVEFP-LABEL: copysign_float32_t: | 
|  | ; MVEFP:       @ %bb.0: @ %entry | 
|  | ; MVEFP-NEXT:    vmov.i32 q2, #0x80000000 | 
|  | ; MVEFP-NEXT:    vbic.i32 q0, #0x80000000 | 
|  | ; MVEFP-NEXT:    vand q1, q1, q2 | 
|  | ; MVEFP-NEXT:    vorr q0, q0, q1 | 
|  | ; MVEFP-NEXT:    bx lr | 
|  | entry: | 
|  | %0 = call fast <4 x float> @llvm.copysign.v4f32(<4 x float> %src1, <4 x float> %src2) | 
|  | ret <4 x float> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <8 x half> @copysign_float16_t(<8 x half> %src1, <8 x half> %src2) { | 
|  | ; FULLFP16-LABEL: copysign_float16_t: | 
|  | ; FULLFP16:       @ %bb.0: @ %entry | 
|  | ; FULLFP16-NEXT:    .pad #32 | 
|  | ; FULLFP16-NEXT:    sub sp, #32 | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s8, s4 | 
|  | ; FULLFP16-NEXT:    vstr.16 s8, [sp, #24] | 
|  | ; FULLFP16-NEXT:    vstr.16 s4, [sp, #28] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s5 | 
|  | ; FULLFP16-NEXT:    vstr.16 s4, [sp, #16] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s6 | 
|  | ; FULLFP16-NEXT:    vstr.16 s5, [sp, #20] | 
|  | ; FULLFP16-NEXT:    vstr.16 s4, [sp, #8] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s7 | 
|  | ; FULLFP16-NEXT:    vstr.16 s6, [sp, #12] | 
|  | ; FULLFP16-NEXT:    vstr.16 s4, [sp] | 
|  | ; FULLFP16-NEXT:    vstr.16 s7, [sp, #4] | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #25] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s0 | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s4 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s6, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s6, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #29] | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s0 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s0, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s0, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #17] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s1 | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s4 | 
|  | ; FULLFP16-NEXT:    vins.f16 s0, s6 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s6, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s6, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #21] | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s1 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s1, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s1, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #9] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s2 | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s4 | 
|  | ; FULLFP16-NEXT:    vins.f16 s1, s6 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s6, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s6, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #13] | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s2 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s2, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s2, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #1] | 
|  | ; FULLFP16-NEXT:    vmovx.f16 s4, s3 | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s4 | 
|  | ; FULLFP16-NEXT:    vins.f16 s2, s6 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s6, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s6, s4 | 
|  | ; FULLFP16-NEXT:    ldrb.w r0, [sp, #5] | 
|  | ; FULLFP16-NEXT:    vabs.f16 s4, s3 | 
|  | ; FULLFP16-NEXT:    vneg.f16 s3, s4 | 
|  | ; FULLFP16-NEXT:    lsls r0, r0, #24 | 
|  | ; FULLFP16-NEXT:    it pl | 
|  | ; FULLFP16-NEXT:    vmovpl.f32 s3, s4 | 
|  | ; FULLFP16-NEXT:    vins.f16 s3, s6 | 
|  | ; FULLFP16-NEXT:    add sp, #32 | 
|  | ; FULLFP16-NEXT:    bx lr | 
|  | ; | 
|  | ; MVEFP-LABEL: copysign_float16_t: | 
|  | ; MVEFP:       @ %bb.0: @ %entry | 
|  | ; MVEFP-NEXT:    vmov.i16 q2, #0x8000 | 
|  | ; MVEFP-NEXT:    vbic.i16 q0, #0x8000 | 
|  | ; MVEFP-NEXT:    vand q1, q1, q2 | 
|  | ; MVEFP-NEXT:    vorr q0, q0, q1 | 
|  | ; MVEFP-NEXT:    bx lr | 
|  | entry: | 
|  | %0 = call fast <8 x half> @llvm.copysign.v8f16(<8 x half> %src1, <8 x half> %src2) | 
|  | ret <8 x half> %0 | 
|  | } | 
|  |  | 
|  | define arm_aapcs_vfpcc <2 x double> @copysign_float64_t(<2 x double> %src1, <2 x double> %src2) { | 
|  | ; CHECK-LABEL: copysign_float64_t: | 
|  | ; CHECK:       @ %bb.0: @ %entry | 
|  | ; CHECK-NEXT:    .save {r7, lr} | 
|  | ; CHECK-NEXT:    push {r7, lr} | 
|  | ; CHECK-NEXT:    vmov r0, r1, d3 | 
|  | ; CHECK-NEXT:    vmov r0, lr, d2 | 
|  | ; CHECK-NEXT:    vmov r0, r3, d1 | 
|  | ; CHECK-NEXT:    vmov r12, r2, d0 | 
|  | ; CHECK-NEXT:    lsrs r1, r1, #31 | 
|  | ; CHECK-NEXT:    bfi r3, r1, #31, #1 | 
|  | ; CHECK-NEXT:    lsr.w r1, lr, #31 | 
|  | ; CHECK-NEXT:    bfi r2, r1, #31, #1 | 
|  | ; CHECK-NEXT:    vmov d1, r0, r3 | 
|  | ; CHECK-NEXT:    vmov d0, r12, r2 | 
|  | ; CHECK-NEXT:    pop {r7, pc} | 
|  | entry: | 
|  | %0 = call fast <2 x double> @llvm.copysign.v2f64(<2 x double> %src1, <2 x double> %src2) | 
|  | ret <2 x double> %0 | 
|  | } | 
|  |  | 
|  | declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.cos.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.sin.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.exp.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.exp2.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.log.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.log2.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.log10.v4f32(<4 x float>) | 
|  | declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>) | 
|  | declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) | 
|  | declare <8 x half> @llvm.sqrt.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.cos.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.sin.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.exp.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.exp2.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.log.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.log2.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.log10.v8f16(<8 x half>) | 
|  | declare <8 x half> @llvm.pow.v8f16(<8 x half>, <8 x half>) | 
|  | declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>) | 
|  | declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.cos.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.sin.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.exp.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.exp2.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.log.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.log2.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.log10.v2f64(<2 x double>) | 
|  | declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) | 
|  | declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) |