| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 | FileCheck %s |
| |
| declare half @llvm.minimum.f16(half, half) |
| declare half @llvm.maximum.f16(half, half) |
| declare <8 x half> @llvm.minimum.v8f16(<8 x half>, <8 x half>) |
| declare <8 x half> @llvm.maximum.v8f16(<8 x half>, <8 x half>) |
| |
| define half @test_fminimum(half %x, half %y) { |
| ; CHECK-LABEL: test_fminimum: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vmovw %xmm0, %eax |
| ; CHECK-NEXT: testw %ax, %ax |
| ; CHECK-NEXT: sets %al |
| ; CHECK-NEXT: kmovd %eax, %k1 |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm2 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm2 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: vminsh %xmm2, %xmm0, %xmm1 |
| ; CHECK-NEXT: vcmpunordsh %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %z = call half @llvm.minimum.f16(half %x, half %y) |
| ret half %z |
| } |
| |
| define <8 x half> @test_fminimum_scalarize(<8 x half> %x, <8 x half> %y) "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" { |
| ; CHECK-LABEL: test_fminimum_scalarize: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vminsh %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm1[3,3,3,3] |
| ; CHECK-NEXT: vshufps {{.*#+}} xmm4 = xmm0[3,3,3,3] |
| ; CHECK-NEXT: vminsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vminsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vshufpd {{.*#+}} xmm4 = xmm1[1,0] |
| ; CHECK-NEXT: vshufpd {{.*#+}} xmm5 = xmm0[1,0] |
| ; CHECK-NEXT: vminsh %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm3 |
| ; CHECK-NEXT: vpsrlq $48, %xmm0, %xmm4 |
| ; CHECK-NEXT: vminsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] |
| ; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3] |
| ; CHECK-NEXT: vminsh %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; CHECK-NEXT: vminsh %xmm1, %xmm0, %xmm4 |
| ; CHECK-NEXT: vpsrld $16, %xmm1, %xmm1 |
| ; CHECK-NEXT: vpsrld $16, %xmm0, %xmm0 |
| ; CHECK-NEXT: vminsh %xmm1, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] |
| ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; CHECK-NEXT: retq |
| %r = call <8 x half> @llvm.minimum.v8f16(<8 x half> %x, <8 x half> %y) |
| ret <8 x half> %r |
| } |
| |
| define half @test_fminimum_nnan(half %x, half %y) "no-nans-fp-math"="true" { |
| ; CHECK-LABEL: test_fminimum_nnan: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vfpclasssh $5, %xmm1, %k1 |
| ; CHECK-NEXT: vmovaps %xmm0, %xmm2 |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm2 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vminsh %xmm2, %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = tail call half @llvm.minimum.f16(half %x, half %y) |
| ret half %1 |
| } |
| |
| define half @test_fminimum_zero(half %x, half %y) { |
| ; CHECK-LABEL: test_fminimum_zero: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vcmpunordsh %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vminsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0 |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %1 = tail call half @llvm.minimum.f16(half -0.0, half %y) |
| ret half %1 |
| } |
| |
| define half @test_fminimum_nsz(half %x, half %y) { |
| ; CHECK-LABEL: test_fminimum_nsz: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vminsh %xmm1, %xmm0, %xmm1 |
| ; CHECK-NEXT: vcmpunordsh %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = tail call nsz half @llvm.minimum.f16(half %x, half %y) |
| ret half %1 |
| } |
| |
| define half @test_fminimum_combine_cmps(half %x, half %y) { |
| ; CHECK-LABEL: test_fminimum_combine_cmps: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vdivsh %xmm0, %xmm1, %xmm1 |
| ; CHECK-NEXT: vfpclasssh $5, %xmm0, %k1 |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm2 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm2 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: vminsh %xmm2, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = fdiv nnan half %y, %x |
| %2 = tail call half @llvm.minimum.f16(half %x, half %1) |
| ret half %2 |
| } |
| |
| define half @test_fmaximum(half %x, half %y) { |
| ; CHECK-LABEL: test_fmaximum: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vmovw %xmm0, %eax |
| ; CHECK-NEXT: testw %ax, %ax |
| ; CHECK-NEXT: sets %al |
| ; CHECK-NEXT: kmovd %eax, %k1 |
| ; CHECK-NEXT: vmovaps %xmm0, %xmm2 |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm2 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmaxsh %xmm2, %xmm1, %xmm0 |
| ; CHECK-NEXT: vcmpunordsh %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %r = call half @llvm.maximum.f16(half %x, half %y) |
| ret half %r |
| } |
| |
| define <8 x half> @test_fmaximum_scalarize(<8 x half> %x, <8 x half> %y) "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" { |
| ; CHECK-LABEL: test_fmaximum_scalarize: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vmaxsh %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm1[3,3,3,3] |
| ; CHECK-NEXT: vshufps {{.*#+}} xmm4 = xmm0[3,3,3,3] |
| ; CHECK-NEXT: vmaxsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero |
| ; CHECK-NEXT: vmaxsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vshufpd {{.*#+}} xmm4 = xmm1[1,0] |
| ; CHECK-NEXT: vshufpd {{.*#+}} xmm5 = xmm0[1,0] |
| ; CHECK-NEXT: vmaxsh %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm3 |
| ; CHECK-NEXT: vpsrlq $48, %xmm0, %xmm4 |
| ; CHECK-NEXT: vmaxsh %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] |
| ; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3] |
| ; CHECK-NEXT: vmaxsh %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] |
| ; CHECK-NEXT: vmaxsh %xmm1, %xmm0, %xmm4 |
| ; CHECK-NEXT: vpsrld $16, %xmm1, %xmm1 |
| ; CHECK-NEXT: vpsrld $16, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmaxsh %xmm1, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] |
| ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] |
| ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; CHECK-NEXT: retq |
| %r = call <8 x half> @llvm.maximum.v8f16(<8 x half> %x, <8 x half> %y) |
| ret <8 x half> %r |
| } |
| |
| define half @test_fmaximum_nnan(half %x, half %y) { |
| ; CHECK-LABEL: test_fmaximum_nnan: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vaddsh %xmm1, %xmm0, %xmm2 |
| ; CHECK-NEXT: vsubsh %xmm1, %xmm0, %xmm0 |
| ; CHECK-NEXT: vfpclasssh $3, %xmm0, %k1 |
| ; CHECK-NEXT: vmovaps %xmm2, %xmm1 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm2, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: vmaxsh %xmm1, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = fadd nnan half %x, %y |
| %2 = fsub nnan half %x, %y |
| %3 = tail call half @llvm.maximum.f16(half %1, half %2) |
| ret half %3 |
| } |
| |
| define half @test_fmaximum_zero(half %x, half %y) { |
| ; CHECK-LABEL: test_fmaximum_zero: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; CHECK-NEXT: vmaxsh %xmm0, %xmm1, %xmm0 |
| ; CHECK-NEXT: vcmpunordsh %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %1 = tail call half @llvm.maximum.f16(half 0.0, half %y) |
| ret half %1 |
| } |
| |
| define half @test_fmaximum_nsz(half %x, half %y) "no-signed-zeros-fp-math"="true" { |
| ; CHECK-LABEL: test_fmaximum_nsz: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vmaxsh %xmm1, %xmm0, %xmm1 |
| ; CHECK-NEXT: vcmpunordsh %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = tail call half @llvm.maximum.f16(half %x, half %y) |
| ret half %1 |
| } |
| |
| define half @test_fmaximum_combine_cmps(half %x, half %y) { |
| ; CHECK-LABEL: test_fmaximum_combine_cmps: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vdivsh %xmm0, %xmm1, %xmm1 |
| ; CHECK-NEXT: vfpclasssh $3, %xmm0, %k1 |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm2 |
| ; CHECK-NEXT: vmovsh %xmm0, %xmm0, %xmm2 {%k1} |
| ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} |
| ; CHECK-NEXT: vmaxsh %xmm2, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %1 = fdiv nnan half %y, %x |
| %2 = tail call half @llvm.maximum.f16(half %x, half %1) |
| ret half %2 |
| } |