| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma | FileCheck %s --check-prefixes=FMA,FMA-INFS |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma | FileCheck %s --check-prefixes=FMA4,FMA4-INFS |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4 | FileCheck %s --check-prefixes=FMA4,FMA4-INFS |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512-INFS |
| |
| ; |
| ; Pattern: (fadd (fmul x, y), z) -> (fmadd x,y,z) |
| ; |
| |
| define float @test_f32_fmadd(float %a0, float %a1, float %a2) { |
| ; FMA-LABEL: test_f32_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract float %a0, %a1 |
| %res = fadd contract float %x, %a2 |
| ret float %res |
| } |
| |
| define <4 x float> @test_4f32_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_4f32_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f32_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f32_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x float> %a0, %a1 |
| %res = fadd contract <4 x float> %x, %a2 |
| ret <4 x float> %res |
| } |
| |
| define <8 x float> @test_8f32_fmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { |
| ; FMA-LABEL: test_8f32_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_8f32_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_8f32_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <8 x float> %a0, %a1 |
| %res = fadd contract <8 x float> %x, %a2 |
| ret <8 x float> %res |
| } |
| |
| define double @test_f64_fmadd(double %a0, double %a1, double %a2) { |
| ; FMA-LABEL: test_f64_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract double %a0, %a1 |
| %res = fadd contract double %x, %a2 |
| ret double %res |
| } |
| |
| define <2 x double> @test_2f64_fmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { |
| ; FMA-LABEL: test_2f64_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_2f64_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_2f64_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <2 x double> %a0, %a1 |
| %res = fadd contract <2 x double> %x, %a2 |
| ret <2 x double> %res |
| } |
| |
| define <4 x double> @test_4f64_fmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_4f64_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f64_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f64_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x double> %a0, %a1 |
| %res = fadd contract <4 x double> %x, %a2 |
| ret <4 x double> %res |
| } |
| |
| ; |
| ; Pattern: (fsub (fmul x, y), z) -> (fmsub x, y, z) |
| ; |
| |
| define float @test_f32_fmsub(float %a0, float %a1, float %a2) { |
| ; FMA-LABEL: test_f32_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubss {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract float %a0, %a1 |
| %res = fsub contract float %x, %a2 |
| ret float %res |
| } |
| |
| define <4 x float> @test_4f32_fmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_4f32_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f32_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f32_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x float> %a0, %a1 |
| %res = fsub contract <4 x float> %x, %a2 |
| ret <4 x float> %res |
| } |
| |
| define <8 x float> @test_8f32_fmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { |
| ; FMA-LABEL: test_8f32_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_8f32_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_8f32_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <8 x float> %a0, %a1 |
| %res = fsub contract <8 x float> %x, %a2 |
| ret <8 x float> %res |
| } |
| |
| define double @test_f64_fmsub(double %a0, double %a1, double %a2) { |
| ; FMA-LABEL: test_f64_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubsd {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract double %a0, %a1 |
| %res = fsub contract double %x, %a2 |
| ret double %res |
| } |
| |
| define <2 x double> @test_2f64_fmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { |
| ; FMA-LABEL: test_2f64_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_2f64_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_2f64_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <2 x double> %a0, %a1 |
| %res = fsub contract <2 x double> %x, %a2 |
| ret <2 x double> %res |
| } |
| |
| define <4 x double> @test_4f64_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_4f64_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f64_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f64_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x double> %a0, %a1 |
| %res = fsub contract <4 x double> %x, %a2 |
| ret <4 x double> %res |
| } |
| |
| ; |
| ; Pattern: (fsub z, (fmul x, y)) -> (fnmadd x, y, z) |
| ; |
| |
| define float @test_f32_fnmadd(float %a0, float %a1, float %a2) { |
| ; FMA-LABEL: test_f32_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddss {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract float %a0, %a1 |
| %res = fsub contract float %a2, %x |
| ret float %res |
| } |
| |
| define <4 x float> @test_4f32_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_4f32_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f32_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f32_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x float> %a0, %a1 |
| %res = fsub contract <4 x float> %a2, %x |
| ret <4 x float> %res |
| } |
| |
| define <8 x float> @test_8f32_fnmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { |
| ; FMA-LABEL: test_8f32_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_8f32_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddps {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_8f32_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <8 x float> %a0, %a1 |
| %res = fsub contract <8 x float> %a2, %x |
| ret <8 x float> %res |
| } |
| |
| define double @test_f64_fnmadd(double %a0, double %a1, double %a2) { |
| ; FMA-LABEL: test_f64_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddsd {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract double %a0, %a1 |
| %res = fsub contract double %a2, %x |
| ret double %res |
| } |
| |
| define <2 x double> @test_2f64_fnmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { |
| ; FMA-LABEL: test_2f64_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_2f64_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddpd {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_2f64_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <2 x double> %a0, %a1 |
| %res = fsub contract <2 x double> %a2, %x |
| ret <2 x double> %res |
| } |
| |
| define <4 x double> @test_4f64_fnmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_4f64_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f64_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddpd {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f64_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x double> %a0, %a1 |
| %res = fsub contract <4 x double> %a2, %x |
| ret <4 x double> %res |
| } |
| |
| ; |
| ; Pattern: (fsub (fneg (fmul x, y)), z) -> (fnmsub x, y, z) |
| ; |
| |
| define float @test_f32_fnmsub(float %a0, float %a1, float %a2) { |
| ; FMA-LABEL: test_f32_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubss {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract float %a0, %a1 |
| %y = fsub contract float -0.000000e+00, %x |
| %res = fsub contract float %y, %a2 |
| ret float %res |
| } |
| |
| define <4 x float> @test_4f32_fnmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_4f32_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f32_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f32_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x float> %a0, %a1 |
| %y = fsub contract <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x |
| %res = fsub contract <4 x float> %y, %a2 |
| ret <4 x float> %res |
| } |
| |
| define <8 x float> @test_8f32_fnmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { |
| ; FMA-LABEL: test_8f32_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_8f32_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} ymm0 = -(ymm0 * ymm1) - ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_8f32_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <8 x float> %a0, %a1 |
| %y = fsub contract <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x |
| %res = fsub contract <8 x float> %y, %a2 |
| ret <8 x float> %res |
| } |
| |
| define double @test_f64_fnmsub(double %a0, double %a1, double %a2) { |
| ; FMA-LABEL: test_f64_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubsd {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract double %a0, %a1 |
| %y = fsub contract double -0.000000e+00, %x |
| %res = fsub contract double %y, %a2 |
| ret double %res |
| } |
| |
| define <2 x double> @test_2f64_fnmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { |
| ; FMA-LABEL: test_2f64_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_2f64_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubpd {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_2f64_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <2 x double> %a0, %a1 |
| %y = fsub contract <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x |
| %res = fsub contract <2 x double> %y, %a2 |
| ret <2 x double> %res |
| } |
| |
| define <4 x double> @test_4f64_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_4f64_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f64_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubpd {{.*#+}} ymm0 = -(ymm0 * ymm1) - ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f64_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; AVX512-NEXT: retq |
| %x = fmul contract <4 x double> %a0, %a1 |
| %y = fsub contract <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %x |
| %res = fsub contract <4 x double> %y, %a2 |
| ret <4 x double> %res |
| } |
| |
| ; |
| ; Load Folding Patterns |
| ; |
| |
| define <4 x float> @test_4f32_fmadd_load(ptr %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_4f32_fmadd_load: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_4f32_fmadd_load: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_4f32_fmadd_load: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; AVX512-NEXT: retq |
| %x = load <4 x float>, ptr %a0 |
| %y = fmul contract <4 x float> %x, %a1 |
| %res = fadd contract <4 x float> %y, %a2 |
| ret <4 x float> %res |
| } |
| |
| define <2 x double> @test_2f64_fmsub_load(ptr %a0, <2 x double> %a1, <2 x double> %a2) { |
| ; FMA-LABEL: test_2f64_fmsub_load: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_2f64_fmsub_load: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_2f64_fmsub_load: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 |
| ; AVX512-NEXT: retq |
| %x = load <2 x double>, ptr %a0 |
| %y = fmul contract <2 x double> %x, %a1 |
| %res = fsub contract <2 x double> %y, %a2 |
| ret <2 x double> %res |
| } |
| |
| ; |
| ; Patterns (+ fneg variants): mul(add(1.0,x),y), mul(sub(1.0,x),y), mul(sub(x,1.0),y) |
| ; |
| |
| define <4 x float> @test_v4f32_mul_add_x_one_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_add_x_one_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_add_x_one_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_add_x_one_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract <4 x float> %a, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_add_x_one_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_add_x_one_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_add_x_one_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_add_x_one_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract ninf <4 x float> %a, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_one(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_one: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_one: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_one: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract <4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_one_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_one_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_one_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_one_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract ninf <4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_one_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_one_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_one_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_one_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float 1.0, float poison, float 1.0, float poison> |
| %m = fmul contract <4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_one_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_one_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_one_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_one_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf<4 x float> %x, <float 1.0, float poison, float 1.0, float poison> |
| %m = fmul contract ninf<4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_add_x_negone_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_add_x_negone_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_add_x_negone_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_add_x_negone_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract <4 x float> %a, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_add_x_negone_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_add_x_negone_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_add_x_negone_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_add_x_negone_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf<4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract ninf<4 x float> %a, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_negone(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_negone: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_negone: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_negone: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract <4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_negone_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_negone_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_negone_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_negone_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf<4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract ninf<4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_negone_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_negone_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_negone_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_negone_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %a = fadd contract <4 x float> %x, <float poison, float -1.0, float poison, float -1.0> |
| %m = fmul contract <4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_add_x_negone_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_add_x_negone_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_add_x_negone_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_add_x_negone_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %a = fadd contract ninf<4 x float> %x, <float poison, float -1.0, float poison, float -1.0> |
| %m = fmul contract ninf<4 x float> %y, %a |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_one_x_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_one_x_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_one_x_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_one_x_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x |
| %m = fmul contract <4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_one_x_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_one_x_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_one_x_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_one_x_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x |
| %m = fmul contract ninf<4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_one_x(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_one_x: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_one_x: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_one_x: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_one_x_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_one_x_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_one_x_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_one_x_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_one_x_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_one_x_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_one_x_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_one_x_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float 1.0, float poison, float 1.0, float 1.0>, %x |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_one_x_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_one_x_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_one_x_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_one_x_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float 1.0, float poison, float 1.0, float 1.0>, %x |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_negone_x_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_negone_x_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_negone_x_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_negone_x_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x |
| %m = fmul contract <4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_negone_x_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_negone_x_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_negone_x_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_negone_x_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x |
| %m = fmul contract ninf<4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_negone_x(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_negone_x: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_negone_x: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_negone_x: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_negone_x_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_negone_x_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_negone_x_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_negone_x_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_negone_x_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_negone_x_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_negone_x_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_negone_x_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1.0E+0,-1.0E+0,-1.0E+0,-1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> <float -1.0, float -1.0, float poison, float -1.0>, %x |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_negone_x_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_negone_x_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_negone_x_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_negone_x_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> <float -1.0, float -1.0, float poison, float -1.0>, %x |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_x_one_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_x_one_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_x_one_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_x_one_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract <4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_x_one_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_x_one_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_x_one_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_x_one_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract ninf<4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_one(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_one: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_one: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_one: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_one_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_one_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_one_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_one_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0> |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_one_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_one_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_one_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_one_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float 1.0, float 1.0, float 1.0, float poison> |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_one_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_one_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_one_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_one_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float 1.0, float 1.0, float 1.0, float poison> |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_x_negone_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_x_negone_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_x_negone_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_x_negone_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract <4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_sub_x_negone_y_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_sub_x_negone_y_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_sub_x_negone_y_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_sub_x_negone_y_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract ninf<4 x float> %s, %y |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_negone(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_negone: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_negone: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_negone: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_negone_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_negone_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_negone_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_negone_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_negone_poisons(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_negone_poisons: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_negone_poisons: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_negone_poisons: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 |
| ; AVX512-NEXT: retq |
| %s = fsub contract <4 x float> %x, <float poison, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract <4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| define <4 x float> @test_v4f32_mul_y_sub_x_negone_poisons_ninf(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_mul_y_sub_x_negone_poisons_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_mul_y_sub_x_negone_poisons_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_mul_y_sub_x_negone_poisons_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %s = fsub contract ninf<4 x float> %x, <float poison, float -1.0, float -1.0, float -1.0> |
| %m = fmul contract ninf<4 x float> %y, %s |
| ret <4 x float> %m |
| } |
| |
| |
| define float @test_f32_interp(float %x, float %y, float %t) { |
| ; FMA-LABEL: test_f32_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0] |
| ; FMA-NEXT: vsubss %xmm2, %xmm3, %xmm3 |
| ; FMA-NEXT: vmulss %xmm3, %xmm1, %xmm1 |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0] |
| ; FMA4-NEXT: vsubss %xmm2, %xmm3, %xmm3 |
| ; FMA4-NEXT: vmulss %xmm3, %xmm1, %xmm1 |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0] |
| ; AVX512-NEXT: vsubss %xmm2, %xmm3, %xmm3 |
| ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz float 1.0, %t |
| %tx = fmul contract nsz float %x, %t |
| %ty = fmul contract nsz float %y, %t1 |
| %r = fadd contract nsz float %tx, %ty |
| ret float %r |
| } |
| |
| define float @test_f32_interp_ninf(float %x, float %y, float %t) { |
| ; FMA-LABEL: test_f32_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ss {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f32_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubss {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA4-NEXT: vfmsubss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f32_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz float 1.0, %t |
| %tx = fmul contract ninf nsz float %x, %t |
| %ty = fmul contract ninf nsz float %y, %t1 |
| %r = fadd contract ninf nsz float %tx, %ty |
| ret float %r |
| } |
| |
| define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float> %t) { |
| ; FMA-LABEL: test_v4f32_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %xmm2, %xmm3, %xmm3 |
| ; FMA-NEXT: vmulps %xmm3, %xmm1, %xmm1 |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %xmm2, %xmm3, %xmm3 |
| ; FMA4-NEXT: vmulps %xmm3, %xmm1, %xmm1 |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %xmm2, %xmm3, %xmm3 |
| ; AVX512-NEXT: vmulps %xmm3, %xmm1, %xmm1 |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %t |
| %tx = fmul contract nsz <4 x float> %x, %t |
| %ty = fmul contract nsz <4 x float> %y, %t1 |
| %r = fadd contract nsz <4 x float> %tx, %ty |
| ret <4 x float> %r |
| } |
| |
| define <4 x float> @test_v4f32_interp_ninf(<4 x float> %x, <4 x float> %y, <4 x float> %t) { |
| ; FMA-LABEL: test_v4f32_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %t |
| %tx = fmul contract ninf nsz <4 x float> %x, %t |
| %ty = fmul contract ninf nsz <4 x float> %y, %t1 |
| %r = fadd contract ninf nsz <4 x float> %tx, %ty |
| ret <4 x float> %r |
| } |
| |
| define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float> %t) { |
| ; FMA-LABEL: test_v8f32_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubps %ymm2, %ymm3, %ymm3 |
| ; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1 |
| ; FMA-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v8f32_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubps %ymm2, %ymm3, %ymm3 |
| ; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1 |
| ; FMA4-NEXT: vfmaddps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8f32_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubps %ymm2, %ymm3, %ymm3 |
| ; AVX512-NEXT: vmulps %ymm3, %ymm1, %ymm1 |
| ; AVX512-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %t |
| %tx = fmul contract nsz <8 x float> %x, %t |
| %ty = fmul contract nsz <8 x float> %y, %t1 |
| %r = fadd contract nsz <8 x float> %tx, %ty |
| ret <8 x float> %r |
| } |
| |
| define <8 x float> @test_v8f32_interp_ninf(<8 x float> %x, <8 x float> %y, <8 x float> %t) { |
| ; FMA-LABEL: test_v8f32_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm2 * ymm0) - ymm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v8f32_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; FMA4-NEXT: vfmsubps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v8f32_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm2 * ymm0) - ymm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %t |
| %tx = fmul contract ninf nsz <8 x float> %x, %t |
| %ty = fmul contract ninf nsz <8 x float> %y, %t1 |
| %r = fadd contract ninf nsz <8 x float> %tx, %ty |
| ret <8 x float> %r |
| } |
| |
| define double @test_f64_interp(double %x, double %y, double %t) { |
| ; FMA-LABEL: test_f64_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0] |
| ; FMA-NEXT: vsubsd %xmm2, %xmm3, %xmm3 |
| ; FMA-NEXT: vmulsd %xmm3, %xmm1, %xmm1 |
| ; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0] |
| ; FMA4-NEXT: vsubsd %xmm2, %xmm3, %xmm3 |
| ; FMA4-NEXT: vmulsd %xmm3, %xmm1, %xmm1 |
| ; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0] |
| ; AVX512-NEXT: vsubsd %xmm2, %xmm3, %xmm3 |
| ; AVX512-NEXT: vmulsd %xmm3, %xmm1, %xmm1 |
| ; AVX512-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz double 1.0, %t |
| %tx = fmul contract nsz double %x, %t |
| %ty = fmul contract nsz double %y, %t1 |
| %r = fadd contract nsz double %tx, %ty |
| ret double %r |
| } |
| |
| define double @test_f64_interp_ninf(double %x, double %y, double %t) { |
| ; FMA-LABEL: test_f64_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213sd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubsd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA4-NEXT: vfmsubsd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213sd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; AVX512-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz double 1.0, %t |
| %tx = fmul contract ninf nsz double %x, %t |
| %ty = fmul contract ninf nsz double %y, %t1 |
| %r = fadd contract ninf nsz double %tx, %ty |
| ret double %r |
| } |
| |
| define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x double> %t) { |
| ; FMA-LABEL: test_v2f64_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmovddup {{.*#+}} xmm3 = [1.0E+0,1.0E+0] |
| ; FMA-NEXT: # xmm3 = mem[0,0] |
| ; FMA-NEXT: vsubpd %xmm2, %xmm3, %xmm3 |
| ; FMA-NEXT: vmulpd %xmm3, %xmm1, %xmm1 |
| ; FMA-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v2f64_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmovddup {{.*#+}} xmm3 = [1.0E+0,1.0E+0] |
| ; FMA4-NEXT: # xmm3 = mem[0,0] |
| ; FMA4-NEXT: vsubpd %xmm2, %xmm3, %xmm3 |
| ; FMA4-NEXT: vmulpd %xmm3, %xmm1, %xmm1 |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v2f64_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmovddup {{.*#+}} xmm3 = [1.0E+0,1.0E+0] |
| ; AVX512-NEXT: # xmm3 = mem[0,0] |
| ; AVX512-NEXT: vsubpd %xmm2, %xmm3, %xmm3 |
| ; AVX512-NEXT: vmulpd %xmm3, %xmm1, %xmm1 |
| ; AVX512-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz <2 x double> <double 1.0, double 1.0>, %t |
| %tx = fmul contract nsz <2 x double> %x, %t |
| %ty = fmul contract nsz <2 x double> %y, %t1 |
| %r = fadd contract nsz <2 x double> %tx, %ty |
| ret <2 x double> %r |
| } |
| |
| define <2 x double> @test_v2f64_interp_ninf(<2 x double> %x, <2 x double> %y, <2 x double> %t) { |
| ; FMA-LABEL: test_v2f64_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v2f64_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v2f64_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} xmm1 = (xmm2 * xmm1) - xmm1 |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm2 * xmm0) - xmm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz <2 x double> <double 1.0, double 1.0>, %t |
| %tx = fmul contract ninf nsz <2 x double> %x, %t |
| %ty = fmul contract ninf nsz <2 x double> %y, %t1 |
| %r = fadd contract ninf nsz <2 x double> %tx, %ty |
| ret <2 x double> %r |
| } |
| |
| define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x double> %t) { |
| ; FMA-LABEL: test_v4f64_interp: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vbroadcastsd {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA-NEXT: vsubpd %ymm2, %ymm3, %ymm3 |
| ; FMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1 |
| ; FMA-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_interp: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vbroadcastsd {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; FMA4-NEXT: vsubpd %ymm2, %ymm3, %ymm3 |
| ; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1 |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_interp: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vbroadcastsd {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] |
| ; AVX512-NEXT: vsubpd %ymm2, %ymm3, %ymm3 |
| ; AVX512-NEXT: vmulpd %ymm3, %ymm1, %ymm1 |
| ; AVX512-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract nsz <4 x double> <double 1.0, double 1.0, double 1.0, double 1.0>, %t |
| %tx = fmul contract nsz <4 x double> %x, %t |
| %ty = fmul contract nsz <4 x double> %y, %t1 |
| %r = fadd contract nsz <4 x double> %tx, %ty |
| ret <4 x double> %r |
| } |
| |
| define <4 x double> @test_v4f64_interp_ninf(<4 x double> %x, <4 x double> %y, <4 x double> %t) { |
| ; FMA-LABEL: test_v4f64_interp_ninf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; FMA-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm2 * ymm0) - ymm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_interp_ninf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; FMA4-NEXT: vfmsubpd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_interp_ninf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} ymm1 = (ymm2 * ymm1) - ymm1 |
| ; AVX512-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm2 * ymm0) - ymm1 |
| ; AVX512-NEXT: retq |
| %t1 = fsub contract ninf nsz <4 x double> <double 1.0, double 1.0, double 1.0, double 1.0>, %t |
| %tx = fmul contract ninf nsz <4 x double> %x, %t |
| %ty = fmul contract ninf nsz <4 x double> %y, %t1 |
| %r = fadd contract ninf nsz <4 x double> %tx, %ty |
| ret <4 x double> %r |
| } |
| |
| ; |
| ; Pattern: (fneg (fma x, y, z)) -> (fma x, -y, -z) |
| ; |
| |
| define <4 x float> @test_v4f32_fneg_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_v4f32_fneg_fmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_fneg_fmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_fneg_fmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %mul = fmul contract nsz <4 x float> %a0, %a1 |
| %add = fadd contract nsz <4 x float> %mul, %a2 |
| %neg = fsub contract nsz <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %add |
| ret <4 x float> %neg |
| } |
| |
| define <4 x double> @test_v4f64_fneg_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_v4f64_fneg_fmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_fneg_fmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfnmaddpd {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_fneg_fmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %mul = fmul contract nsz <4 x double> %a0, %a1 |
| %sub = fsub contract nsz <4 x double> %mul, %a2 |
| %neg = fsub contract nsz <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %sub |
| ret <4 x double> %neg |
| } |
| |
| define <4 x float> @test_v4f32_fneg_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { |
| ; FMA-LABEL: test_v4f32_fneg_fnmadd: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_fneg_fnmadd: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_fneg_fnmadd: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %mul = fmul contract nsz <4 x float> %a0, %a1 |
| %neg0 = fsub contract nsz <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %mul |
| %add = fadd contract nsz <4 x float> %neg0, %a2 |
| %neg1 = fsub contract nsz <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %add |
| ret <4 x float> %neg1 |
| } |
| |
| define <4 x double> @test_v4f64_fneg_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { |
| ; FMA-LABEL: test_v4f64_fneg_fnmsub: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_fneg_fnmsub: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_fneg_fnmsub: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 |
| ; AVX512-NEXT: retq |
| %mul = fmul contract nsz <4 x double> %a0, %a1 |
| %neg0 = fsub contract nsz <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %mul |
| %sub = fsub contract nsz <4 x double> %neg0, %a2 |
| %neg1 = fsub contract nsz <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %sub |
| ret <4 x double> %neg1 |
| } |
| |
| ; |
| ; Pattern: (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) |
| ; |
| |
| define <4 x float> @test_v4f32_fma_x_c1_fmul_x_c2(<4 x float> %x) { |
| ; FMA-LABEL: test_v4f32_fma_x_c1_fmul_x_c2: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_fma_x_c1_fmul_x_c2: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_fma_x_c1_fmul_x_c2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: retq |
| %m0 = fmul contract reassoc <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0> |
| %m1 = fmul contract reassoc <4 x float> %x, <float 4.0, float 3.0, float 2.0, float 1.0> |
| %a = fadd contract reassoc <4 x float> %m0, %m1 |
| ret <4 x float> %a |
| } |
| |
| ; |
| ; Pattern: (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) |
| ; |
| |
| define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_fma_fmul_x_c1_c2_y: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_fma_fmul_x_c1_c2_y: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_fma_fmul_x_c1_c2_y: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 |
| ; AVX512-NEXT: retq |
| %m0 = fmul contract reassoc <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0> |
| %m1 = fmul contract reassoc <4 x float> %m0, <float 4.0, float 3.0, float 2.0, float 1.0> |
| %a = fadd contract reassoc <4 x float> %m1, %y |
| ret <4 x float> %a |
| } |
| |
| ; Pattern: (fneg (fmul x, y)) -> (fnmsub x, y, 0) |
| |
| define double @test_f64_fneg_fmul(double %x, double %y) { |
| ; FMA-LABEL: test_f64_fneg_fmul: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; FMA-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_f64_fneg_fmul: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfnmsubsd {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_f64_fneg_fmul: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %m = fmul contract nsz double %x, %y |
| %n = fsub contract double -0.0, %m |
| ret double %n |
| } |
| |
| define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) { |
| ; FMA-LABEL: test_v4f32_fneg_fmul: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; FMA-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f32_fneg_fmul: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f32_fneg_fmul: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 |
| ; AVX512-NEXT: retq |
| %m = fmul contract nsz <4 x float> %x, %y |
| %n = fsub contract <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %m |
| ret <4 x float> %n |
| } |
| |
| define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) { |
| ; FMA-LABEL: test_v4f64_fneg_fmul: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; FMA-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_fneg_fmul: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfnmsubpd {{.*#+}} ymm0 = -(ymm0 * ymm1) - ymm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_fneg_fmul: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 |
| ; AVX512-NEXT: retq |
| %m = fmul contract nsz <4 x double> %x, %y |
| %n = fsub contract <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %m |
| ret <4 x double> %n |
| } |
| |
| define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %y) { |
| ; FMA-LABEL: test_v4f64_fneg_fmul_no_nsz: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0 |
| ; FMA-NEXT: vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: test_v4f64_fneg_fmul_no_nsz: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0 |
| ; FMA4-NEXT: vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: test_v4f64_fneg_fmul_no_nsz: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulpd %ymm1, %ymm0, %ymm0 |
| ; AVX512-NEXT: vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 |
| ; AVX512-NEXT: retq |
| %m = fmul contract <4 x double> %x, %y |
| %n = fsub contract <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %m |
| ret <4 x double> %n |
| } |
| |
| ; ((a*b) + (c*d)) + n1 --> (a*b) + ((c*d) + n1) |
| |
| define double @fadd_fma_fmul_1(double %a, double %b, double %c, double %d, double %n1) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_1: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213sd {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm4 |
| ; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_1: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddsd {{.*#+}} xmm2 = (xmm2 * xmm3) + xmm4 |
| ; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213sd {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm4 |
| ; AVX512-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract fast double %a, %b |
| %m2 = fmul contract fast double %c, %d |
| %a1 = fadd contract fast double %m1, %m2 |
| %a2 = fadd contract fast double %a1, %n1 |
| ret double %a2 |
| } |
| |
| ; Minimum FMF - the 1st fadd is contracted because that combines |
| ; fmul+fadd as specified by the order of operations; the 2nd fadd |
| ; requires reassociation to fuse with c*d. |
| |
| define float @fadd_fma_fmul_fmf(float %a, float %b, float %c, float %d, float %n0) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_fmf: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm4 |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_fmf: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm2 = (xmm2 * xmm3) + xmm4 |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_fmf: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm4 |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract float %a, %b |
| %m2 = fmul contract float %c, %d |
| %a1 = fadd contract float %m1, %m2 |
| %a2 = fadd contract reassoc float %n0, %a1 |
| ret float %a2 |
| } |
| |
| ; Not minimum FMF. |
| |
| define float @fadd_fma_fmul_2(float %a, float %b, float %c, float %d, float %n0) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_2: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA-NEXT: vfmadd231ss {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_2: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfmadd231ss {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract float %a, %b |
| %m2 = fmul contract float %c, %d |
| %a1 = fadd contract float %m1, %m2 |
| %a2 = fadd contract float %n0, %a1 |
| ret float %a2 |
| } |
| |
| ; The final fadd can be folded with either 1 of the leading fmuls. |
| |
| define <2 x double> @fadd_fma_fmul_3(<2 x double> %x1, <2 x double> %x2, <2 x double> %x3, <2 x double> %x4, <2 x double> %x5, <2 x double> %x6, <2 x double> %x7, <2 x double> %x8) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_3: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulpd %xmm3, %xmm2, %xmm2 |
| ; FMA-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm7 * xmm6) + xmm2 |
| ; FMA-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm5 * xmm4) + xmm2 |
| ; FMA-NEXT: vmovapd %xmm2, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_3: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulpd %xmm3, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm6 * xmm7) + xmm0 |
| ; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm4 * xmm5) + xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_3: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulpd %xmm3, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm7 * xmm6) + xmm2 |
| ; AVX512-NEXT: vfmadd231pd {{.*#+}} xmm2 = (xmm5 * xmm4) + xmm2 |
| ; AVX512-NEXT: vmovapd %xmm2, %xmm0 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract fast <2 x double> %x1, %x2 |
| %m2 = fmul contract fast <2 x double> %x3, %x4 |
| %m3 = fmul contract fast <2 x double> %x5, %x6 |
| %m4 = fmul contract fast <2 x double> %x7, %x8 |
| %a1 = fadd contract fast <2 x double> %m1, %m2 |
| %a2 = fadd contract fast <2 x double> %m3, %m4 |
| %a3 = fadd contract fast <2 x double> %a1, %a2 |
| ret <2 x double> %a3 |
| } |
| |
| ; negative test |
| |
| define float @fadd_fma_fmul_extra_use_1(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_extra_use_1: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 |
| ; FMA-NEXT: vmovss %xmm0, (%rdi) |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm0 |
| ; FMA-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_extra_use_1: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulss %xmm1, %xmm0, %xmm0 |
| ; FMA4-NEXT: vmovss %xmm0, (%rdi) |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm2 * xmm3) + xmm0 |
| ; FMA4-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_extra_use_1: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 |
| ; AVX512-NEXT: vmovss %xmm0, (%rdi) |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm3 * xmm2) + xmm0 |
| ; AVX512-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract fast float %a, %b |
| store float %m1, ptr %p |
| %m2 = fmul contract fast float %c, %d |
| %a1 = fadd contract fast float %m1, %m2 |
| %a2 = fadd contract fast float %n0, %a1 |
| ret float %a2 |
| } |
| |
| ; negative test |
| |
| define float @fadd_fma_fmul_extra_use_2(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_extra_use_2: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA-NEXT: vmovss %xmm2, (%rdi) |
| ; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_extra_use_2: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA4-NEXT: vmovss %xmm2, (%rdi) |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_extra_use_2: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; AVX512-NEXT: vmovss %xmm2, (%rdi) |
| ; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract fast float %a, %b |
| %m2 = fmul contract fast float %c, %d |
| store float %m2, ptr %p |
| %a1 = fadd contract fast float %m1, %m2 |
| %a2 = fadd contract fast float %n0, %a1 |
| ret float %a2 |
| } |
| |
| ; negative test |
| |
| define float @fadd_fma_fmul_extra_use_3(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind { |
| ; FMA-LABEL: fadd_fma_fmul_extra_use_3: |
| ; FMA: # %bb.0: |
| ; FMA-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA-NEXT: vfmadd231ss {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; FMA-NEXT: vmovss %xmm2, (%rdi) |
| ; FMA-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; FMA-NEXT: retq |
| ; |
| ; FMA4-LABEL: fadd_fma_fmul_extra_use_3: |
| ; FMA4: # %bb.0: |
| ; FMA4-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm2 |
| ; FMA4-NEXT: vmovss %xmm0, (%rdi) |
| ; FMA4-NEXT: vaddss %xmm0, %xmm4, %xmm0 |
| ; FMA4-NEXT: retq |
| ; |
| ; AVX512-LABEL: fadd_fma_fmul_extra_use_3: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vmulss %xmm3, %xmm2, %xmm2 |
| ; AVX512-NEXT: vfmadd231ss {{.*#+}} xmm2 = (xmm1 * xmm0) + xmm2 |
| ; AVX512-NEXT: vmovss %xmm2, (%rdi) |
| ; AVX512-NEXT: vaddss %xmm2, %xmm4, %xmm0 |
| ; AVX512-NEXT: retq |
| %m1 = fmul contract fast float %a, %b |
| %m2 = fmul contract fast float %c, %d |
| %a1 = fadd contract fast float %m1, %m2 |
| store float %a1, ptr %p |
| %a2 = fadd contract fast float %n0, %a1 |
| ret float %a2 |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; AVX512-INFS: {{.*}} |
| ; FMA-INFS: {{.*}} |
| ; FMA4-INFS: {{.*}} |