blob: 501bdf17613661ed193e30276d7dfd3d0331a913 [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=cannonlake | FileCheck %s --check-prefix=CNL
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=cannonlake -mattr=-avx512vl | FileCheck %s --check-prefix=NOVLX
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512ifma,+avx512dq,+avx512vl,+slow-pmullq | FileCheck %s --check-prefix=GENERIC
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512ifma,+avx512dq,-avx512vl,+slow-pmullq | FileCheck %s --check-prefix=GENERIC-NOVLX
; ============================================================================
; Case 1: 52-bit Optimization (vpmadd52luq)
; ============================================================================
define <8 x i64> @test_mul_52bit_fits(<8 x i64> %a, <8 x i64> %b) {
; CNL-LABEL: test_mul_52bit_fits:
; CNL: # %bb.0:
; CNL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm2
; CNL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
; CNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CNL-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_52bit_fits:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm2
; NOVLX-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
; NOVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; NOVLX-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_52bit_fits:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm2
; GENERIC-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
; GENERIC-NEXT: vpxor %xmm0, %xmm0, %xmm0
; GENERIC-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_52bit_fits:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm2
; GENERIC-NOVLX-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
; GENERIC-NOVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; GENERIC-NOVLX-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; GENERIC-NOVLX-NEXT: retq
%a_masked = and <8 x i64> %a, splat (i64 8589934591)
%b_masked = and <8 x i64> %b, splat (i64 524287)
%res = mul <8 x i64> %a_masked, %b_masked
ret <8 x i64> %res
}
; ============================================================================
; Case 1.5: Non-constant test (using Logical Shift Right to clear high bits)
; ============================================================================
define <8 x i64> @test_mul_shift_high_bits(<8 x i64> %a, <8 x i64> %b) {
; CNL-LABEL: test_mul_shift_high_bits:
; CNL: # %bb.0:
; CNL-NEXT: vpsrlq $31, %zmm0, %zmm2
; CNL-NEXT: vpsrlq $45, %zmm1, %zmm1
; CNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CNL-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_shift_high_bits:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpsrlq $31, %zmm0, %zmm2
; NOVLX-NEXT: vpsrlq $45, %zmm1, %zmm1
; NOVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; NOVLX-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_shift_high_bits:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlq $31, %zmm0, %zmm2
; GENERIC-NEXT: vpsrlq $45, %zmm1, %zmm1
; GENERIC-NEXT: vpxor %xmm0, %xmm0, %xmm0
; GENERIC-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_shift_high_bits:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpsrlq $31, %zmm0, %zmm2
; GENERIC-NOVLX-NEXT: vpsrlq $45, %zmm1, %zmm1
; GENERIC-NOVLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; GENERIC-NOVLX-NEXT: vpmadd52luq %zmm1, %zmm2, %zmm0
; GENERIC-NOVLX-NEXT: retq
%a_shifted = lshr <8 x i64> %a, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
%b_shifted = lshr <8 x i64> %b, <i64 45, i64 45, i64 45, i64 45, i64 45, i64 45, i64 45, i64 45>
%res = mul <8 x i64> %a_shifted, %b_shifted
ret <8 x i64> %res
}
; ============================================================================
; Case 2: 32-bit Optimization (vpmuludq)
; ============================================================================
define <8 x i64> @test_mul_32bit_fits(<8 x i64> %a, <8 x i64> %b) {
; CNL-LABEL: test_mul_32bit_fits:
; CNL: # %bb.0:
; CNL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_32bit_fits:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_32bit_fits:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_32bit_fits:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; GENERIC-NOVLX-NEXT: retq
%a_masked = and <8 x i64> %a, splat (i64 4294967295)
%b_masked = and <8 x i64> %b, splat (i64 4294967295)
%res = mul <8 x i64> %a_masked, %b_masked
ret <8 x i64> %res
}
; ============================================================================
; Case 3: No Optimization (Full 64-bit)
; ============================================================================
define <8 x i64> @test_mul_full_64bit(<8 x i64> %a, <8 x i64> %b) {
; CNL-LABEL: test_mul_full_64bit:
; CNL: # %bb.0:
; CNL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_full_64bit:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_full_64bit:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_full_64bit:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; GENERIC-NOVLX-NEXT: retq
%res = mul <8 x i64> %a, %b
ret <8 x i64> %res
}
; ============================================================================
; Case 4: Vector Width Variety (Check 256-bit / YMM)
; ============================================================================
define <4 x i64> @test_mul_52bit_ymm(<4 x i64> %a, <4 x i64> %b) {
; CNL-LABEL: test_mul_52bit_ymm:
; CNL: # %bb.0:
; CNL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
; CNL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
; CNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CNL-NEXT: vpmadd52luq %ymm1, %ymm2, %ymm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_52bit_ymm:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [8589934591,8589934591,8589934591,8589934591]
; NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
; NOVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [524287,524287,524287,524287]
; NOVLX-NEXT: vpand %ymm2, %ymm1, %ymm1
; NOVLX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_52bit_ymm:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
; GENERIC-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
; GENERIC-NEXT: vpxor %xmm0, %xmm0, %xmm0
; GENERIC-NEXT: vpmadd52luq %ymm1, %ymm2, %ymm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_52bit_ymm:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [8589934591,8589934591,8589934591,8589934591]
; GENERIC-NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
; GENERIC-NOVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [524287,524287,524287,524287]
; GENERIC-NOVLX-NEXT: vpand %ymm2, %ymm1, %ymm1
; GENERIC-NOVLX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; GENERIC-NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; GENERIC-NOVLX-NEXT: retq
%a_masked = and <4 x i64> %a, splat (i64 8589934591)
%b_masked = and <4 x i64> %b, splat (i64 524287)
%res = mul <4 x i64> %a_masked, %b_masked
ret <4 x i64> %res
}
; ============================================================================
; Case 1.5: 32-bit Signed Optimization (vpmuldq)
; ============================================================================
define <8 x i64> @test_mul_32bit_signed(<8 x i32> %a, <8 x i32> %b) {
; CNL-LABEL: test_mul_32bit_signed:
; CNL: # %bb.0:
; CNL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; CNL-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; CNL-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; CNL-NEXT: retq
;
; NOVLX-LABEL: test_mul_32bit_signed:
; NOVLX: # %bb.0:
; NOVLX-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; NOVLX-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; NOVLX-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; NOVLX-NEXT: retq
;
; GENERIC-LABEL: test_mul_32bit_signed:
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; GENERIC-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; GENERIC-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq
;
; GENERIC-NOVLX-LABEL: test_mul_32bit_signed:
; GENERIC-NOVLX: # %bb.0:
; GENERIC-NOVLX-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; GENERIC-NOVLX-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; GENERIC-NOVLX-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; GENERIC-NOVLX-NEXT: retq
%a_ = sext <8 x i32> %a to <8 x i64>
%b_ = sext <8 x i32> %b to <8 x i64>
%res = mul <8 x i64> %a_, %b_
ret <8 x i64> %res
}