| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,SI |
| ; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,VI |
| |
| ; Make sure high constant 0 isn't pointlessly materialized |
| define i16 @trunc_bitcast_i64_lshr_32_i16(i64 %bar) { |
| ; GCN-LABEL: trunc_bitcast_i64_lshr_32_i16: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, v1 |
| ; GCN-NEXT: s_setpc_b64 s[30:31] |
| %srl = lshr i64 %bar, 32 |
| %trunc = trunc i64 %srl to i16 |
| ret i16 %trunc |
| } |
| |
| define i32 @trunc_bitcast_i64_lshr_32_i32(i64 %bar) { |
| ; GCN-LABEL: trunc_bitcast_i64_lshr_32_i32: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, v1 |
| ; GCN-NEXT: s_setpc_b64 s[30:31] |
| %srl = lshr i64 %bar, 32 |
| %trunc = trunc i64 %srl to i32 |
| ret i32 %trunc |
| } |
| |
| define i16 @trunc_bitcast_v2i32_to_i16(<2 x i32> %bar) { |
| ; SI-LABEL: trunc_bitcast_v2i32_to_i16: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s7, 0xf000 |
| ; SI-NEXT: s_mov_b32 s6, -1 |
| ; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_add_i32_e32 v0, vcc, 4, v0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: trunc_bitcast_v2i32_to_i16: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: flat_load_dword v0, v[0:1] |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, 4, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %load0 = load i32, ptr addrspace(1) undef |
| %load1 = load i32, ptr addrspace(1) null |
| %insert.0 = insertelement <2 x i32> undef, i32 %load0, i32 0 |
| %insert.1 = insertelement <2 x i32> %insert.0, i32 99, i32 1 |
| %bc = bitcast <2 x i32> %insert.1 to i64 |
| %trunc = trunc i64 %bc to i16 |
| %add = add i16 %trunc, 4 |
| ret i16 %add |
| } |
| |
| ; Make sure there's no crash if the source vector type is FP |
| define i16 @trunc_bitcast_v2f32_to_i16(<2 x float> %bar) { |
| ; SI-LABEL: trunc_bitcast_v2f32_to_i16: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s7, 0xf000 |
| ; SI-NEXT: s_mov_b32 s6, -1 |
| ; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_add_i32_e32 v0, vcc, 4, v0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: trunc_bitcast_v2f32_to_i16: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: flat_load_dword v0, v[0:1] |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, 4, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %load0 = load float, ptr addrspace(1) undef |
| %load1 = load float, ptr addrspace(1) null |
| %insert.0 = insertelement <2 x float> undef, float %load0, i32 0 |
| %insert.1 = insertelement <2 x float> %insert.0, float 4.0, i32 1 |
| %bc = bitcast <2 x float> %insert.1 to i64 |
| %trunc = trunc i64 %bc to i16 |
| %add = add i16 %trunc, 4 |
| ret i16 %add |
| } |
| |
| define amdgpu_kernel void @truncate_high_elt_extract_vector(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture readonly %arg1, ptr addrspace(1) nocapture %arg2) local_unnamed_addr { |
| ; SI-LABEL: truncate_high_elt_extract_vector: |
| ; SI: ; %bb.0: ; %bb |
| ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd |
| ; SI-NEXT: s_mov_b32 s7, 0xf000 |
| ; SI-NEXT: s_mov_b32 s6, -1 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; SI-NEXT: s_load_dword s1, s[2:3], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_sext_i32_i16 s0, s0 |
| ; SI-NEXT: s_sext_i32_i16 s1, s1 |
| ; SI-NEXT: s_mul_i32 s1, s1, s0 |
| ; SI-NEXT: s_lshr_b32 s0, s1, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: truncate_high_elt_extract_vector: |
| ; VI: ; %bb.0: ; %bb |
| ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; VI-NEXT: s_load_dword s1, s[2:3], 0x0 |
| ; VI-NEXT: v_mov_b32_e32 v0, s4 |
| ; VI-NEXT: v_mov_b32_e32 v1, s5 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_sext_i32_i16 s0, s0 |
| ; VI-NEXT: s_sext_i32_i16 s1, s1 |
| ; VI-NEXT: s_mul_i32 s1, s1, s0 |
| ; VI-NEXT: s_lshr_b32 s0, s1, 16 |
| ; VI-NEXT: v_mov_b32_e32 v2, s0 |
| ; VI-NEXT: flat_store_dword v[0:1], v2 |
| ; VI-NEXT: s_endpgm |
| bb: |
| %tmp = getelementptr inbounds <2 x i16>, ptr addrspace(1) %arg, i64 undef |
| %tmp3 = load <2 x i16>, ptr addrspace(1) %tmp, align 4 |
| %tmp4 = getelementptr inbounds <2 x i16>, ptr addrspace(1) %arg1, i64 undef |
| %tmp5 = load <2 x i16>, ptr addrspace(1) %tmp4, align 4 |
| %tmp6 = sext <2 x i16> %tmp3 to <2 x i32> |
| %tmp7 = sext <2 x i16> %tmp5 to <2 x i32> |
| %tmp8 = extractelement <2 x i32> %tmp6, i64 0 |
| %tmp9 = extractelement <2 x i32> %tmp7, i64 0 |
| %tmp10 = mul nsw i32 %tmp9, %tmp8 |
| %tmp11 = insertelement <2 x i32> undef, i32 %tmp10, i32 0 |
| %tmp12 = insertelement <2 x i32> %tmp11, i32 undef, i32 1 |
| %tmp13 = lshr <2 x i32> %tmp12, <i32 16, i32 16> |
| %tmp14 = trunc <2 x i32> %tmp13 to <2 x i16> |
| %tmp15 = getelementptr inbounds <2 x i16>, ptr addrspace(1) %arg2, i64 undef |
| store <2 x i16> %tmp14, ptr addrspace(1) %tmp15, align 4 |
| ret void |
| } |
| |
| define <2 x i16> @trunc_v2i64_arg_to_v2i16(<2 x i64> %arg0) #0 { |
| ; SI-LABEL: trunc_v2i64_arg_to_v2i16: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_or_b32_e32 v0, v0, v1 |
| ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: trunc_v2i64_arg_to_v2i16: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s4, 0x1000504 |
| ; VI-NEXT: v_perm_b32 v0, v0, v2, s4 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %trunc = trunc <2 x i64> %arg0 to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| ; Test for regression where an unnecessary v_alignbit_b32 was inserted |
| ; on the final result, due to losing the fact that the upper half of |
| ; the lhs vector was undef. |
| define <2 x i16> @vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = lshr <2 x i32> %undef.hi.elt, splat (i32 16) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshr_b32_e32 v0, 16, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_lshrrev_b32_e64 v0, v0, 16 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = lshr <2 x i32> splat (i32 16), %undef.hi.elt |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %ashr = ashr <2 x i32> %undef.hi.elt, splat (i32 16) |
| %trunc = trunc <2 x i32> %ashr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_ashr_i32_e32 v0, -4, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_ashrrev_i32_e64 v0, v0, -4 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = ashr <2 x i32> splat (i32 -4), %undef.hi.elt |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_add_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_add_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_add_i32_e32 v0, vcc, 16, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_add_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = add <2 x i32> %undef.hi.elt, splat (i32 16) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_shl_rhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_shl_rhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshl_b32_e32 v0, 2, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_shl_rhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_lshlrev_b32_e64 v0, v0, 2 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = shl <2 x i32> splat (i32 2), %undef.hi.elt |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_sub_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_sub_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_add_i32_e32 v0, vcc, -16, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_sub_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, -16, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = sub <2 x i32> %undef.hi.elt, splat (i32 16) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_or_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_or_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_or_b32_e32 v0, 0xffff0011, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0xffff |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_or_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_or_b32_e32 v0, 0xffff0011, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = or <2 x i32> %undef.hi.elt, splat (i32 17) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_xor_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_xor_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_xor_b32_e32 v0, 17, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_xor_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_xor_b32_e32 v0, 17, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = xor <2 x i32> %undef.hi.elt, splat (i32 17) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_shl_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_shl_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xfffc, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_shl_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_lshlrev_b16_e32 v0, 2, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %shl = shl <2 x i32> %undef.hi.elt, splat (i32 2) |
| %trunc = trunc <2 x i32> %shl to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_mul_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_mul_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: v_mul_lo_u32 v0, v0, 18 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_mul_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: v_mul_lo_u32 v0, v0, 18 |
| ; VI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = mul <2 x i32> %undef.hi.elt, splat (i32 18) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; SI-NEXT: v_mul_hi_i32 v0, v0, s4 |
| ; SI-NEXT: v_lshrrev_b32_e32 v1, 31, v0 |
| ; SI-NEXT: v_lshrrev_b32_e32 v0, 2, v0 |
| ; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; VI-NEXT: v_mul_hi_i32 v0, v0, s4 |
| ; VI-NEXT: v_lshrrev_b32_e32 v1, 31, v0 |
| ; VI-NEXT: v_ashrrev_i32_e32 v0, 2, v0 |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = sdiv <2 x i32> %undef.hi.elt, splat (i32 18) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_srem_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_srem_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; SI-NEXT: v_mul_hi_i32 v1, v0, s4 |
| ; SI-NEXT: v_lshrrev_b32_e32 v2, 31, v1 |
| ; SI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 |
| ; SI-NEXT: v_add_i32_e32 v1, vcc, v1, v2 |
| ; SI-NEXT: v_mul_lo_u32 v1, v1, 18 |
| ; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_srem_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; VI-NEXT: v_mul_hi_i32 v1, v0, s4 |
| ; VI-NEXT: v_lshrrev_b32_e32 v2, 31, v1 |
| ; VI-NEXT: v_ashrrev_i32_e32 v1, 2, v1 |
| ; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v2 |
| ; VI-NEXT: v_mul_lo_u32 v1, v1, 18 |
| ; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = srem <2 x i32> %undef.hi.elt, splat (i32 18) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; SI-NEXT: v_mul_hi_u32 v0, v0, s4 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: v_bfe_u32 v0, v0, 2, 16 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; VI-NEXT: v_mul_hi_u32 v0, v0, s4 |
| ; VI-NEXT: v_lshrrev_b32_e32 v0, 2, v0 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = udiv <2 x i32> %undef.hi.elt, splat (i32 18) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |
| |
| define <2 x i16> @vector_trunc_high_bits_undef_urem_lhs_alignbit_regression(i32 %arg0) { |
| ; SI-LABEL: vector_trunc_high_bits_undef_urem_lhs_alignbit_regression: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; SI-NEXT: v_mul_hi_u32 v1, v0, s4 |
| ; SI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 |
| ; SI-NEXT: v_mul_lo_u32 v1, v1, 18 |
| ; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 |
| ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; VI-LABEL: vector_trunc_high_bits_undef_urem_lhs_alignbit_regression: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s4, 0x38e38e39 |
| ; VI-NEXT: v_mul_hi_u32 v1, v0, s4 |
| ; VI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 |
| ; VI-NEXT: v_mul_lo_u32 v1, v1, 18 |
| ; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 |
| ; VI-NEXT: s_setpc_b64 s[30:31] |
| %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 |
| %lshr = urem <2 x i32> %undef.hi.elt, splat (i32 18) |
| %trunc = trunc <2 x i32> %lshr to <2 x i16> |
| ret <2 x i16> %trunc |
| } |