| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s |
| |
| define amdgpu_kernel void @float4_inselt(ptr addrspace(1) %out, <4 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float4_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 3 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 2 |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v0, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v0, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x float> %vec, float 1.000000e+00, i32 %sel |
| store <4 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @float4_inselt_undef(ptr addrspace(1) %out, i32 %sel) { |
| ; GCN-LABEL: float4_inselt_undef: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: v_mov_b32_e32 v0, 1.0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v2, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v3, v0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x float> undef, float 1.000000e+00, i32 %sel |
| store <4 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @int4_inselt(ptr addrspace(1) %out, <4 x i32> %vec, i32 %sel) { |
| ; GCN-LABEL: int4_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 3 |
| ; GCN-NEXT: s_cselect_b32 s3, s7, 1 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 2 |
| ; GCN-NEXT: s_cselect_b32 s6, s6, 1 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 1 |
| ; GCN-NEXT: s_cselect_b32 s5, s5, 1 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 0 |
| ; GCN-NEXT: s_cselect_b32 s2, s4, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x i32> %vec, i32 1, i32 %sel |
| store <4 x i32> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @float2_inselt(ptr addrspace(1) %out, <2 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float2_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s4, s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_cmp_lg_u32 s4, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s4, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v0, s2 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <2 x float> %vec, float 1.000000e+00, i32 %sel |
| store <2 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @float8_inselt(ptr addrspace(1) %out, <8 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float8_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x64 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: s_mov_b32 m0, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 16 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s3 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 1.0 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <8 x float> %vec, float 1.000000e+00, i32 %sel |
| store <8 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @float16_inselt(ptr addrspace(1) %out, <16 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float16_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0x64 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_load_dword s20, s[0:1], 0xa4 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: s_add_u32 s0, s2, 48 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s17 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s18 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s19 |
| ; GCN-NEXT: s_mov_b32 m0, s20 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 32 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 1.0 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[16:17], v[12:15] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 16 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[12:13], v[8:11] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <16 x float> %vec, float 1.000000e+00, i32 %sel |
| store <16 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @float32_inselt(ptr addrspace(1) %out, <32 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float32_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx16 s[36:51], s[0:1], 0xa4 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0xe4 |
| ; GCN-NEXT: s_load_dword s0, s[0:1], 0x124 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s36 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s37 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s38 |
| ; GCN-NEXT: s_mov_b32 m0, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 0x70 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v33, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s39 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s40 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s41 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s42 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s43 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s44 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s45 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s46 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s47 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s48 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s49 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s50 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s51 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v19, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v22, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v23, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v24, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v25, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v26, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v27, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v28, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v29, s17 |
| ; GCN-NEXT: v_mov_b32_e32 v30, s18 |
| ; GCN-NEXT: v_mov_b32_e32 v31, s19 |
| ; GCN-NEXT: v_mov_b32_e32 v32, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 0x60 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 1.0 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[32:33], v[28:31] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v29, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v28, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 0x50 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[28:29], v[24:27] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v25, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v24, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 64 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[24:25], v[20:23] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 48 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[20:21], v[16:19] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 32 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[16:17], v[12:15] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s0 |
| ; GCN-NEXT: s_add_u32 s0, s2, 16 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[12:13], v[8:11] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <32 x float> %vec, float 1.000000e+00, i32 %sel |
| store <32 x float> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @half4_inselt(ptr addrspace(1) %out, <4 x half> %vec, i32 %sel) { |
| ; GCN-LABEL: half4_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s6, s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_mov_b32 s4, 0x3c003c00 |
| ; GCN-NEXT: s_mov_b32 s5, s4 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s6, s6, 4 |
| ; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6 |
| ; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7] |
| ; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] |
| ; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3] |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s3 |
| ; GCN-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x half> %vec, half 1.000000e+00, i32 %sel |
| store <4 x half> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @half2_inselt(ptr addrspace(1) %out, <2 x half> %vec, i32 %sel) { |
| ; GCN-LABEL: half2_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s3, s3, 4 |
| ; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3 |
| ; GCN-NEXT: s_andn2_b32 s2, s2, s3 |
| ; GCN-NEXT: s_and_b32 s3, s3, 0x3c003c00 |
| ; GCN-NEXT: s_or_b32 s2, s3, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <2 x half> %vec, half 1.000000e+00, i32 %sel |
| store <2 x half> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @half8_inselt(ptr addrspace(1) %out, <8 x half> %vec, i32 %sel) { |
| ; GCN-LABEL: half8_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: v_mov_b32_e32 v0, 0x3c00 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshr_b32 s3, s7, 16 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 7 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v2, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 5 |
| ; GCN-NEXT: v_or_b32_sdwa v3, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s5, 16 |
| ; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 3 |
| ; GCN-NEXT: v_or_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 2 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v4, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s4, 16 |
| ; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 1 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v4, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v5, s4 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc |
| ; GCN-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <8 x half> %vec, half 1.000000e+00, i32 %sel |
| store <8 x half> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @short2_inselt(ptr addrspace(1) %out, <2 x i16> %vec, i32 %sel) { |
| ; GCN-LABEL: short2_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s3, s3, 4 |
| ; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3 |
| ; GCN-NEXT: s_andn2_b32 s2, s2, s3 |
| ; GCN-NEXT: s_and_b32 s3, s3, 0x10001 |
| ; GCN-NEXT: s_or_b32 s2, s3, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <2 x i16> %vec, i16 1, i32 %sel |
| store <2 x i16> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @short4_inselt(ptr addrspace(1) %out, <4 x i16> %vec, i32 %sel) { |
| ; GCN-LABEL: short4_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s6, s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_mov_b32 s4, 0x10001 |
| ; GCN-NEXT: s_mov_b32 s5, s4 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s6, s6, 4 |
| ; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6 |
| ; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7] |
| ; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] |
| ; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3] |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s3 |
| ; GCN-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x i16> %vec, i16 1, i32 %sel |
| store <4 x i16> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @byte8_inselt(ptr addrspace(1) %out, <8 x i8> %vec, i32 %sel) { |
| ; GCN-LABEL: byte8_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s4, s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s4, s4, 3 |
| ; GCN-NEXT: s_lshl_b64 s[4:5], 0xff, s4 |
| ; GCN-NEXT: s_and_b32 s7, s5, 0x1010101 |
| ; GCN-NEXT: s_and_b32 s6, s4, 0x1010101 |
| ; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] |
| ; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] |
| ; GCN-NEXT: v_mov_b32_e32 v3, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <8 x i8> %vec, i8 1, i32 %sel |
| store <8 x i8> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @byte16_inselt(ptr addrspace(1) %out, <16 x i8> %vec, i32 %sel) { |
| ; GCN-LABEL: byte16_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshr_b32 s3, s7, 24 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 15 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s7, 16 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 14 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s7, 8 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v0, 8, v0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 13 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 12 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v2, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v1, 8, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, 1, v2, vcc |
| ; GCN-NEXT: s_lshr_b32 s3, s6, 24 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 11 |
| ; GCN-NEXT: v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v0, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s6, 16 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 10 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s6, 8 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v0, 8, v0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 9 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 8 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v1, 8, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, 1, v2, vcc |
| ; GCN-NEXT: s_lshr_b32 s3, s5, 24 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 7 |
| ; GCN-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v0, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s5, 16 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s5, 8 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v0, 8, v0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 5 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v4, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v1, 8, v1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, 1, v4, vcc |
| ; GCN-NEXT: s_lshr_b32 s3, s4, 24 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 3 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v0, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s4, 16 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 2 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v4, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_lshr_b32 s3, s4, 8 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v0, 8, v0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, 1, v4, vcc |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 1 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v4, s3 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s2, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, 1, v4, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v5, s4 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v4, 8, v4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v5, 1, v5, vcc |
| ; GCN-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <16 x i8> %vec, i8 1, i32 %sel |
| store <16 x i8> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double2_inselt(ptr addrspace(1) %out, <2 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double2_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x44 |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_cmp_eq_u32 s2, 1 |
| ; GCN-NEXT: s_cselect_b32 s3, 0x3ff00000, s7 |
| ; GCN-NEXT: s_cselect_b32 s6, 0, s6 |
| ; GCN-NEXT: s_cmp_eq_u32 s2, 0 |
| ; GCN-NEXT: s_cselect_b32 s2, 0x3ff00000, s5 |
| ; GCN-NEXT: s_cselect_b32 s4, 0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <2 x double> %vec, double 1.000000e+00, i32 %sel |
| store <2 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double5_inselt(ptr addrspace(1) %out, <5 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double5_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s12, s[0:1], 0xa4 |
| ; GCN-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x84 |
| ; GCN-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x24 |
| ; GCN-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x64 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_cmp_eq_u32 s12, 4 |
| ; GCN-NEXT: s_cselect_b32 s9, 0x3ff00000, s9 |
| ; GCN-NEXT: s_cselect_b32 s8, 0, s8 |
| ; GCN-NEXT: s_cmp_eq_u32 s12, 1 |
| ; GCN-NEXT: s_cselect_b32 s3, 0x3ff00000, s3 |
| ; GCN-NEXT: s_cselect_b32 s2, 0, s2 |
| ; GCN-NEXT: s_cmp_eq_u32 s12, 0 |
| ; GCN-NEXT: s_cselect_b32 s13, 0x3ff00000, s1 |
| ; GCN-NEXT: s_cselect_b32 s14, 0, s0 |
| ; GCN-NEXT: s_cmp_eq_u32 s12, 3 |
| ; GCN-NEXT: s_cselect_b32 s0, 0x3ff00000, s7 |
| ; GCN-NEXT: s_cselect_b32 s1, 0, s6 |
| ; GCN-NEXT: s_cmp_eq_u32 s12, 2 |
| ; GCN-NEXT: s_cselect_b32 s5, 0x3ff00000, s5 |
| ; GCN-NEXT: s_cselect_b32 s4, 0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s0 |
| ; GCN-NEXT: s_add_u32 s0, s10, 16 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s1 |
| ; GCN-NEXT: s_addc_u32 s1, s11, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: v_mov_b32_e32 v4, s10 |
| ; GCN-NEXT: s_add_u32 s0, s10, 32 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s11 |
| ; GCN-NEXT: s_addc_u32 s1, s11, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <5 x double> %vec, double 1.000000e+00, i32 %sel |
| store <5 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double8_inselt(ptr addrspace(1) %out, <8 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double8_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0xa4 |
| ; GCN-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0x64 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: v_mov_b32_e32 v16, 0x3ff00000 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s2, s2, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s17 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s18 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s19 |
| ; GCN-NEXT: s_mov_b32 m0, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 48 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 0 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: v_movreld_b32_e32 v1, v16 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 32 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[16:17], v[12:15] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 16 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[12:13], v[8:11] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel |
| store <8 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double7_inselt(ptr addrspace(1) %out, <7 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double7_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x64 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x94 |
| ; GCN-NEXT: s_load_dwordx4 s[12:15], s[0:1], 0x84 |
| ; GCN-NEXT: s_load_dword s0, s[0:1], 0xa4 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s7 |
| ; GCN-NEXT: s_lshl_b32 s0, s0, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s17 |
| ; GCN-NEXT: s_mov_b32 m0, s0 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v16, 0x3ff00000 |
| ; GCN-NEXT: s_add_u32 s0, s2, 16 |
| ; GCN-NEXT: v_movreld_b32_e32 v1, v16 |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[14:15], v[4:7] |
| ; GCN-NEXT: s_add_u32 s0, s2, 48 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: s_add_u32 s0, s2, 32 |
| ; GCN-NEXT: flat_store_dwordx2 v[0:1], v[12:13] |
| ; GCN-NEXT: s_addc_u32 s1, s3, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_dwordx4 v[0:1], v[8:11] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <7 x double> %vec, double 1.000000e+00, i32 %sel |
| store <7 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double16_inselt(ptr addrspace(1) %out, <16 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double16_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dword s2, s[0:1], 0x124 |
| ; GCN-NEXT: s_load_dwordx16 s[36:51], s[0:1], 0xa4 |
| ; GCN-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0xe4 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: v_mov_b32_e32 v32, 0x3ff00000 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s36 |
| ; GCN-NEXT: s_lshl_b32 s2, s2, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s37 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s38 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s39 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s40 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s41 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s42 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s43 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s44 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s45 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s46 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s47 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s48 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s49 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s50 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s51 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s6 |
| ; GCN-NEXT: v_mov_b32_e32 v19, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v22, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v23, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v24, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v25, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v26, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v27, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v28, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v29, s17 |
| ; GCN-NEXT: v_mov_b32_e32 v30, s18 |
| ; GCN-NEXT: v_mov_b32_e32 v31, s19 |
| ; GCN-NEXT: s_mov_b32 m0, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 0x70 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 0 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: v_movreld_b32_e32 v1, v32 |
| ; GCN-NEXT: v_mov_b32_e32 v33, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v32, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 0x60 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[32:33], v[28:31] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v29, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v28, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 0x50 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[28:29], v[24:27] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v25, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v24, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 64 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[24:25], v[20:23] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 48 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[20:21], v[16:19] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 32 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[16:17], v[12:15] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 16 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[12:13], v[8:11] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <16 x double> %vec, double 1.000000e+00, i32 %sel |
| store <16 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @double15_inselt(ptr addrspace(1) %out, <15 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double15_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx16 s[4:19], s[0:1], 0xa4 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x114 |
| ; GCN-NEXT: s_load_dwordx4 s[20:23], s[0:1], 0x104 |
| ; GCN-NEXT: s_load_dwordx8 s[24:31], s[0:1], 0xe4 |
| ; GCN-NEXT: v_mov_b32_e32 v32, 0x3ff00000 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: s_load_dword s4, s[0:1], 0x124 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 |
| ; GCN-NEXT: v_mov_b32_e32 v28, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshl_b32 s2, s4, 1 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: v_mov_b32_e32 v6, s10 |
| ; GCN-NEXT: v_mov_b32_e32 v7, s11 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s12 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s13 |
| ; GCN-NEXT: v_mov_b32_e32 v10, s14 |
| ; GCN-NEXT: v_mov_b32_e32 v11, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s16 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s17 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s18 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s19 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s24 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s25 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s26 |
| ; GCN-NEXT: v_mov_b32_e32 v19, s27 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s28 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s29 |
| ; GCN-NEXT: v_mov_b32_e32 v22, s30 |
| ; GCN-NEXT: v_mov_b32_e32 v23, s31 |
| ; GCN-NEXT: v_mov_b32_e32 v24, s20 |
| ; GCN-NEXT: v_mov_b32_e32 v25, s21 |
| ; GCN-NEXT: v_mov_b32_e32 v26, s22 |
| ; GCN-NEXT: v_mov_b32_e32 v27, s23 |
| ; GCN-NEXT: v_mov_b32_e32 v29, s3 |
| ; GCN-NEXT: s_mov_b32 m0, s2 |
| ; GCN-NEXT: v_movreld_b32_e32 v0, 0 |
| ; GCN-NEXT: s_add_u32 s2, s0, 0x50 |
| ; GCN-NEXT: v_movreld_b32_e32 v1, v32 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v31, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v30, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 64 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[30:31], v[20:23] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v21, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v20, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 48 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[20:21], v[16:19] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 32 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[16:17], v[12:15] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v12, s2 |
| ; GCN-NEXT: s_add_u32 s2, s0, 16 |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: flat_store_dwordx4 v[12:13], v[8:11] |
| ; GCN-NEXT: s_nop 0 |
| ; GCN-NEXT: v_mov_b32_e32 v9, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v8, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; GCN-NEXT: s_add_u32 s2, s0, 0x70 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_addc_u32 s3, s1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: s_add_u32 s0, s0, 0x60 |
| ; GCN-NEXT: flat_store_dwordx2 v[0:1], v[28:29] |
| ; GCN-NEXT: s_addc_u32 s1, s1, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_dwordx4 v[0:1], v[24:27] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <15 x double> %vec, double 1.000000e+00, i32 %sel |
| store <15 x double> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @bit4_inselt(ptr addrspace(1) %out, <4 x i1> %vec, i32 %sel) { |
| ; GCN-LABEL: bit4_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_mov_b32 s4, SCRATCH_RSRC_DWORD0 |
| ; GCN-NEXT: s_mov_b32 s5, SCRATCH_RSRC_DWORD1 |
| ; GCN-NEXT: s_mov_b32 s6, -1 |
| ; GCN-NEXT: s_mov_b32 s7, 0xe80000 |
| ; GCN-NEXT: s_add_u32 s4, s4, s3 |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_addc_u32 s5, s5, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, 4 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_and_b32 s3, s3, 3 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v2, 1, s2 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v3, 2, s2 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v4, 3, s2 |
| ; GCN-NEXT: v_or_b32_e32 v0, s3, v0 |
| ; GCN-NEXT: v_and_b32_e32 v2, 1, v2 |
| ; GCN-NEXT: v_and_b32_e32 v3, 3, v3 |
| ; GCN-NEXT: v_and_b32_e32 v4, 1, v4 |
| ; GCN-NEXT: buffer_store_byte v1, off, s[4:7], 0 offset:4 |
| ; GCN-NEXT: buffer_store_byte v4, off, s[4:7], 0 offset:7 |
| ; GCN-NEXT: buffer_store_byte v3, off, s[4:7], 0 offset:6 |
| ; GCN-NEXT: buffer_store_byte v2, off, s[4:7], 0 offset:5 |
| ; GCN-NEXT: v_mov_b32_e32 v1, 1 |
| ; GCN-NEXT: buffer_store_byte v1, v0, s[4:7], 0 offen |
| ; GCN-NEXT: buffer_load_ubyte v0, off, s[4:7], 0 offset:4 |
| ; GCN-NEXT: buffer_load_ubyte v1, off, s[4:7], 0 offset:5 |
| ; GCN-NEXT: buffer_load_ubyte v2, off, s[4:7], 0 offset:6 |
| ; GCN-NEXT: buffer_load_ubyte v3, off, s[4:7], 0 offset:7 |
| ; GCN-NEXT: s_waitcnt vmcnt(3) |
| ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 |
| ; GCN-NEXT: s_waitcnt vmcnt(2) |
| ; GCN-NEXT: v_and_b32_e32 v1, 1, v1 |
| ; GCN-NEXT: s_waitcnt vmcnt(1) |
| ; GCN-NEXT: v_and_b32_e32 v2, 1, v2 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v1, 1, v1 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v2, 2, v2 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v1 |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_lshlrev_b16_e32 v3, 3, v3 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v2 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v3 |
| ; GCN-NEXT: v_and_b32_e32 v2, 15, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_byte v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <4 x i1> %vec, i1 1, i32 %sel |
| store <4 x i1> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @bit128_inselt(ptr addrspace(1) %out, <128 x i1> %vec, i32 %sel) { |
| ; GCN-LABEL: bit128_inselt: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 |
| ; GCN-NEXT: s_load_dword s0, s[0:1], 0x44 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_lshr_b32 s1, s4, 24 |
| ; GCN-NEXT: s_lshr_b32 s8, s4, 16 |
| ; GCN-NEXT: s_lshr_b32 s9, s4, 17 |
| ; GCN-NEXT: s_lshr_b32 s10, s4, 18 |
| ; GCN-NEXT: s_lshr_b32 s11, s4, 19 |
| ; GCN-NEXT: s_lshr_b32 s12, s4, 20 |
| ; GCN-NEXT: s_lshr_b32 s13, s4, 21 |
| ; GCN-NEXT: s_lshr_b32 s14, s4, 22 |
| ; GCN-NEXT: s_lshr_b32 s15, s4, 23 |
| ; GCN-NEXT: s_lshr_b32 s16, s5, 24 |
| ; GCN-NEXT: s_lshr_b32 s17, s5, 16 |
| ; GCN-NEXT: s_lshr_b32 s18, s5, 17 |
| ; GCN-NEXT: s_lshr_b32 s19, s5, 18 |
| ; GCN-NEXT: s_lshr_b32 s20, s5, 19 |
| ; GCN-NEXT: s_lshr_b32 s21, s5, 20 |
| ; GCN-NEXT: s_lshr_b32 s22, s5, 21 |
| ; GCN-NEXT: s_lshr_b32 s23, s5, 22 |
| ; GCN-NEXT: s_lshr_b32 s24, s5, 23 |
| ; GCN-NEXT: s_lshr_b32 s25, s6, 24 |
| ; GCN-NEXT: s_lshr_b32 s26, s6, 16 |
| ; GCN-NEXT: s_lshr_b32 s27, s6, 17 |
| ; GCN-NEXT: s_lshr_b32 s28, s6, 18 |
| ; GCN-NEXT: s_lshr_b32 s29, s6, 19 |
| ; GCN-NEXT: s_lshr_b32 s30, s6, 20 |
| ; GCN-NEXT: s_lshr_b32 s31, s6, 21 |
| ; GCN-NEXT: s_lshr_b32 s33, s6, 22 |
| ; GCN-NEXT: s_lshr_b32 s34, s6, 23 |
| ; GCN-NEXT: s_lshr_b32 s35, s7, 24 |
| ; GCN-NEXT: s_lshr_b32 s36, s7, 16 |
| ; GCN-NEXT: s_lshr_b32 s37, s7, 17 |
| ; GCN-NEXT: s_lshr_b32 s38, s7, 18 |
| ; GCN-NEXT: s_lshr_b32 s39, s7, 19 |
| ; GCN-NEXT: s_lshr_b32 s40, s7, 20 |
| ; GCN-NEXT: s_lshr_b32 s41, s7, 21 |
| ; GCN-NEXT: s_lshr_b32 s42, s7, 22 |
| ; GCN-NEXT: s_lshr_b32 s43, s7, 23 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x77 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s43 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x76 |
| ; GCN-NEXT: v_cndmask_b32_e32 v15, 1, v15, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v18, s42 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 3, v15 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 2, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x75 |
| ; GCN-NEXT: v_or_b32_e32 v15, v15, v18 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s41 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x74 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v19, s40 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 1, v18 |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_and_b32_e32 v18, 3, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x73 |
| ; GCN-NEXT: v_or_b32_e32 v15, v18, v15 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s39 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x72 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v19, s38 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x71 |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_mov_b32_e32 v19, s37 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x70 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v20, s36 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 4, v15 |
| ; GCN-NEXT: v_and_b32_e32 v18, 15, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7f |
| ; GCN-NEXT: v_or_b32_e32 v15, v18, v15 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 7, s35 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7e |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 6, s35 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7d |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 5, s35 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7c |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 4, s35 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7b |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 3, s35 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x7a |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 2, s35 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x78 |
| ; GCN-NEXT: v_mov_b32_e32 v13, s35 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 3, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 2, v20 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x79 |
| ; GCN-NEXT: v_or_b32_e32 v19, v19, v20 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 1, s35 |
| ; GCN-NEXT: v_cndmask_b32_e32 v13, 1, v13, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v13, 1, v13 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v13, v13, v20 |
| ; GCN-NEXT: v_and_b32_e32 v13, 3, v13 |
| ; GCN-NEXT: v_or_b32_e32 v19, v13, v19 |
| ; GCN-NEXT: v_mov_b32_e32 v13, 15 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 12, v18 |
| ; GCN-NEXT: v_and_b32_sdwa v19, v19, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6f |
| ; GCN-NEXT: v_or_b32_sdwa v15, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 15, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6e |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 14, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6d |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 13, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6c |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 12, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6b |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 11, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x6a |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 10, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 3, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 2, v20 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x69 |
| ; GCN-NEXT: v_or_b32_e32 v19, v19, v20 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 9, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x68 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 8, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 1, v20 |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v20 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 12, v18 |
| ; GCN-NEXT: v_and_b32_sdwa v17, v17, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x67 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 7, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x66 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 6, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x65 |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 5, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x64 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 4, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x63 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 3, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x62 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 2, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 3, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 2, v20 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x61 |
| ; GCN-NEXT: v_or_b32_e32 v19, v19, v20 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 1, s7 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x60 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s7 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 1, v20 |
| ; GCN-NEXT: v_and_b32_e32 v16, 1, v16 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v20 |
| ; GCN-NEXT: v_and_b32_e32 v16, 3, v16 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 4, v18 |
| ; GCN-NEXT: v_and_b32_e32 v16, 15, v16 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x57 |
| ; GCN-NEXT: v_or_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v17, s34 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x56 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v18, s33 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 3, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 2, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x55 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v18 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s31 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x54 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v19, s30 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 1, v18 |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_and_b32_e32 v18, 3, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x53 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s29 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x52 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v19, s28 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x51 |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_mov_b32_e32 v19, s27 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x50 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v20, s26 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 4, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 15, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5f |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 7, s25 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5e |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 6, s25 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5d |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 5, s25 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5c |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 4, s25 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5b |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 3, s25 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x5a |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 2, s25 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x58 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s25 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 3, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 2, v20 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x59 |
| ; GCN-NEXT: v_or_b32_e32 v19, v19, v20 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 1, s25 |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, 1, v3, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v3, 1, v3 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v3, v3, v20 |
| ; GCN-NEXT: v_and_b32_e32 v3, 3, v3 |
| ; GCN-NEXT: v_or_b32_e32 v3, v3, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 12, v18 |
| ; GCN-NEXT: v_and_b32_sdwa v3, v3, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_e32 v3, v18, v3 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4f |
| ; GCN-NEXT: v_or_b32_sdwa v17, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_lshrrev_b16_e64 v3, 15, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4e |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 14, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, 1, v3, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v3, 3, v3 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 2, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4d |
| ; GCN-NEXT: v_or_b32_e32 v3, v3, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 13, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4c |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 12, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 1, v18 |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_and_b32_e32 v18, 3, v18 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4b |
| ; GCN-NEXT: v_or_b32_e32 v3, v18, v3 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 11, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x4a |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 10, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 3, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x49 |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 9, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x48 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 8, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v19, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v3, 12, v3 |
| ; GCN-NEXT: v_and_b32_sdwa v18, v18, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x47 |
| ; GCN-NEXT: v_or_b32_e32 v18, v3, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v3, 7, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x46 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 6, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, 1, v3, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v3, 3, v3 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x45 |
| ; GCN-NEXT: v_or_b32_e32 v3, v3, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 5, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x44 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 4, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_or_b32_e32 v19, v20, v19 |
| ; GCN-NEXT: v_and_b32_e32 v19, 3, v19 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x43 |
| ; GCN-NEXT: v_or_b32_e32 v19, v19, v3 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v3, 3, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x42 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 2, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, 1, v3, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: v_and_b32_e32 v20, 1, v20 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v3, 3, v3 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 2, v20 |
| ; GCN-NEXT: s_cmpk_lg_i32 s0, 0x41 |
| ; GCN-NEXT: v_or_b32_e32 v3, v3, v20 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v20, 1, s6 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 64 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_cndmask_b32_e32 v20, 1, v20, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v2, 1, v2, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v20, 1, v20 |
| ; GCN-NEXT: v_and_b32_e32 v2, 1, v2 |
| ; GCN-NEXT: v_or_b32_e32 v2, v2, v20 |
| ; GCN-NEXT: v_and_b32_e32 v2, 3, v2 |
| ; GCN-NEXT: v_or_b32_e32 v2, v2, v3 |
| ; GCN-NEXT: v_or_b32_sdwa v3, v16, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 4, v19 |
| ; GCN-NEXT: v_and_b32_e32 v2, 15, v2 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 55 |
| ; GCN-NEXT: v_or_b32_e32 v2, v2, v15 |
| ; GCN-NEXT: v_mov_b32_e32 v15, s24 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 54 |
| ; GCN-NEXT: v_cndmask_b32_e32 v15, 1, v15, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v16, s23 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_and_b32_e32 v16, 1, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 3, v15 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 2, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 53 |
| ; GCN-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_e32 v15, v15, v16 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s22 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 52 |
| ; GCN-NEXT: v_or_b32_sdwa v2, v2, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v17, s21 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 1, v16 |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_and_b32_e32 v16, 3, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 51 |
| ; GCN-NEXT: v_or_b32_e32 v15, v16, v15 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s20 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 50 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v17, s19 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 49 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s18 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 48 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v18, s17 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 4, v15 |
| ; GCN-NEXT: v_and_b32_e32 v16, 15, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 63 |
| ; GCN-NEXT: v_or_b32_e32 v15, v16, v15 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 7, s16 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 62 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 6, s16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 61 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 5, s16 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 60 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 4, s16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 59 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 3, s16 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 58 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 2, s16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 56 |
| ; GCN-NEXT: v_mov_b32_e32 v14, s16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 3, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 2, v18 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 57 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 1, s16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v14, 1, v14 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v14, v14, v18 |
| ; GCN-NEXT: v_and_b32_e32 v14, 3, v14 |
| ; GCN-NEXT: v_or_b32_e32 v14, v14, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 12, v16 |
| ; GCN-NEXT: v_and_b32_sdwa v14, v14, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_e32 v14, v16, v14 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 47 |
| ; GCN-NEXT: v_or_b32_sdwa v15, v15, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_lshrrev_b16_e64 v14, 15, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 46 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 14, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_and_b32_e32 v16, 1, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 3, v14 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 2, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 45 |
| ; GCN-NEXT: v_or_b32_e32 v14, v14, v16 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 13, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 44 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 12, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 1, v16 |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_and_b32_e32 v16, 3, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 43 |
| ; GCN-NEXT: v_or_b32_e32 v14, v16, v14 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 11, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 42 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 10, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 41 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 9, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 40 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 8, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 12, v14 |
| ; GCN-NEXT: v_and_b32_sdwa v16, v16, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 39 |
| ; GCN-NEXT: v_or_b32_e32 v16, v14, v16 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v14, 7, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 38 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 6, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 3, v14 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 37 |
| ; GCN-NEXT: v_or_b32_e32 v14, v14, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 5, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 36 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 4, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 35 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v14 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v14, 3, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 34 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 2, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 3, v14 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v18, 2, v18 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 33 |
| ; GCN-NEXT: v_or_b32_e32 v18, v14, v18 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v14, 1, s5 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 32 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1, v1, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 1, v14 |
| ; GCN-NEXT: v_and_b32_e32 v1, 1, v1 |
| ; GCN-NEXT: v_or_b32_e32 v1, v1, v14 |
| ; GCN-NEXT: v_and_b32_e32 v1, 3, v1 |
| ; GCN-NEXT: v_or_b32_e32 v1, v1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 4, v17 |
| ; GCN-NEXT: v_and_b32_e32 v1, 15, v1 |
| ; GCN-NEXT: v_or_b32_e32 v1, v1, v17 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 23 |
| ; GCN-NEXT: v_or_b32_sdwa v1, v1, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v15, s15 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 22 |
| ; GCN-NEXT: v_cndmask_b32_e32 v15, 1, v15, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v16, s14 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_and_b32_e32 v16, 1, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 3, v15 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 2, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 21 |
| ; GCN-NEXT: v_or_b32_e32 v15, v15, v16 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s13 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 20 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v17, s12 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 1, v16 |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_and_b32_e32 v16, 3, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 19 |
| ; GCN-NEXT: v_or_b32_e32 v15, v16, v15 |
| ; GCN-NEXT: v_mov_b32_e32 v16, s11 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 18 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v17, s10 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_mov_b32_e32 v17, s9 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_mov_b32_e32 v19, s8 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v17, v19, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v15, 4, v15 |
| ; GCN-NEXT: v_and_b32_e32 v16, 15, v16 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 31 |
| ; GCN-NEXT: v_or_b32_e32 v15, v16, v15 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 7, s1 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 30 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 6, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 29 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 5, s1 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 28 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 4, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v17, v19, v17 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 27 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 3, s1 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 26 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 2, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v19, 1, v19 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 24 |
| ; GCN-NEXT: v_mov_b32_e32 v18, s1 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 3, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 2, v19 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 25 |
| ; GCN-NEXT: v_or_b32_e32 v17, v17, v19 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 1, s1 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v19, 1, v19, vcc |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v19, 1, v19 |
| ; GCN-NEXT: v_or_b32_e32 v18, v18, v19 |
| ; GCN-NEXT: v_and_b32_e32 v18, 3, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 12, v16 |
| ; GCN-NEXT: v_and_b32_sdwa v17, v17, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 15 |
| ; GCN-NEXT: v_or_b32_sdwa v15, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_lshrrev_b16_e64 v16, 15, s4 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 14 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 14, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v16, 1, v16, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: v_and_b32_e32 v17, 1, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 3, v16 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 2, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 13 |
| ; GCN-NEXT: v_or_b32_e32 v16, v16, v17 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v17, 13, s4 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 12 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v18, 12, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v17, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v18, 1, v18, vcc |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 1, v17 |
| ; GCN-NEXT: v_and_b32_e32 v18, 1, v18 |
| ; GCN-NEXT: v_or_b32_e32 v17, v18, v17 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 11 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v19, 11, s4 |
| ; GCN-NEXT: v_and_b32_e32 v17, 3, v17 |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 10 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v14, 10, s4 |
| ; GCN-NEXT: v_or_b32_e32 v16, v17, v16 |
| ; GCN-NEXT: v_cndmask_b32_e32 v17, 1, v19, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 9 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v12, 9, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v14, 1, v14, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 8 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v11, 8, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v12, 1, v12, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 7 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v10, 7, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v11, 1, v11, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 6 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v9, 6, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v10, 1, v10, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 5 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v8, 5, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v9, 1, v9, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 4 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v7, 4, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v8, 1, v8, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 3 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v6, 3, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v7, 1, v7, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 2 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v5, 2, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v6, 1, v6, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 1 |
| ; GCN-NEXT: v_lshrrev_b16_e64 v4, 1, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v5, 1, v5, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: s_cmp_lg_u32 s0, 0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_cndmask_b32_e32 v4, 1, v4, vcc |
| ; GCN-NEXT: s_cselect_b64 vcc, -1, 0 |
| ; GCN-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc |
| ; GCN-NEXT: v_and_b32_e32 v14, 1, v14 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v12, 1, v12 |
| ; GCN-NEXT: v_and_b32_e32 v11, 1, v11 |
| ; GCN-NEXT: v_and_b32_e32 v9, 1, v9 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v8, 1, v8 |
| ; GCN-NEXT: v_and_b32_e32 v7, 1, v7 |
| ; GCN-NEXT: v_and_b32_e32 v5, 1, v5 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v4, 1, v4 |
| ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v17, 3, v17 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v14, 2, v14 |
| ; GCN-NEXT: v_or_b32_e32 v11, v11, v12 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v10, 3, v10 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v9, 2, v9 |
| ; GCN-NEXT: v_or_b32_e32 v7, v7, v8 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v6, 3, v6 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v5, 2, v5 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v4 |
| ; GCN-NEXT: v_or_b32_e32 v14, v17, v14 |
| ; GCN-NEXT: v_and_b32_e32 v11, 3, v11 |
| ; GCN-NEXT: v_or_b32_e32 v9, v10, v9 |
| ; GCN-NEXT: v_and_b32_e32 v7, 3, v7 |
| ; GCN-NEXT: v_or_b32_e32 v5, v6, v5 |
| ; GCN-NEXT: v_and_b32_e32 v0, 3, v0 |
| ; GCN-NEXT: v_or_b32_e32 v11, v11, v14 |
| ; GCN-NEXT: v_or_b32_e32 v7, v7, v9 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v5 |
| ; GCN-NEXT: v_lshlrev_b16_e32 v16, 12, v16 |
| ; GCN-NEXT: v_and_b32_sdwa v11, v11, v13 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD |
| ; GCN-NEXT: v_lshlrev_b16_e32 v7, 4, v7 |
| ; GCN-NEXT: v_and_b32_e32 v0, 15, v0 |
| ; GCN-NEXT: v_or_b32_e32 v11, v16, v11 |
| ; GCN-NEXT: v_or_b32_e32 v0, v0, v7 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v5, s3 |
| ; GCN-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD |
| ; GCN-NEXT: v_mov_b32_e32 v4, s2 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| entry: |
| %v = insertelement <128 x i1> %vec, i1 1, i32 %sel |
| store <128 x i1> %v, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_ps <32 x float> @float32_inselt_vec(<32 x float> %vec, i32 %sel) { |
| ; GCN-LABEL: float32_inselt_vec: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 2, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[2:3], 3, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 4, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[6:7], 5, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[8:9], 6, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[10:11], 7, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[12:13], 8, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[14:15], 9, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[16:17], 10, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[18:19], 11, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[20:21], 12, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[22:23], 13, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[24:25], 14, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[26:27], 15, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[28:29], 16, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[30:31], 17, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[34:35], 18, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[36:37], 19, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[38:39], 20, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[40:41], 21, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[42:43], 22, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[44:45], 23, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[46:47], 24, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[48:49], 25, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[50:51], 26, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[52:53], 27, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[54:55], 28, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[56:57], 29, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[58:59], 30, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[60:61], 31, v32 |
| ; GCN-NEXT: v_cmp_ne_u32_e64 s[62:63], 0, v32 |
| ; GCN-NEXT: v_cndmask_b32_e64 v0, 1.0, v0, s[62:63] |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, 1.0, v1, vcc |
| ; GCN-NEXT: v_cndmask_b32_e64 v2, 1.0, v2, s[0:1] |
| ; GCN-NEXT: v_cndmask_b32_e64 v3, 1.0, v3, s[2:3] |
| ; GCN-NEXT: v_cndmask_b32_e64 v4, 1.0, v4, s[4:5] |
| ; GCN-NEXT: v_cndmask_b32_e64 v5, 1.0, v5, s[6:7] |
| ; GCN-NEXT: v_cndmask_b32_e64 v6, 1.0, v6, s[8:9] |
| ; GCN-NEXT: v_cndmask_b32_e64 v7, 1.0, v7, s[10:11] |
| ; GCN-NEXT: v_cndmask_b32_e64 v8, 1.0, v8, s[12:13] |
| ; GCN-NEXT: v_cndmask_b32_e64 v9, 1.0, v9, s[14:15] |
| ; GCN-NEXT: v_cndmask_b32_e64 v10, 1.0, v10, s[16:17] |
| ; GCN-NEXT: v_cndmask_b32_e64 v11, 1.0, v11, s[18:19] |
| ; GCN-NEXT: v_cndmask_b32_e64 v12, 1.0, v12, s[20:21] |
| ; GCN-NEXT: v_cndmask_b32_e64 v13, 1.0, v13, s[22:23] |
| ; GCN-NEXT: v_cndmask_b32_e64 v14, 1.0, v14, s[24:25] |
| ; GCN-NEXT: v_cndmask_b32_e64 v15, 1.0, v15, s[26:27] |
| ; GCN-NEXT: v_cndmask_b32_e64 v16, 1.0, v16, s[28:29] |
| ; GCN-NEXT: v_cndmask_b32_e64 v17, 1.0, v17, s[30:31] |
| ; GCN-NEXT: v_cndmask_b32_e64 v18, 1.0, v18, s[34:35] |
| ; GCN-NEXT: v_cndmask_b32_e64 v19, 1.0, v19, s[36:37] |
| ; GCN-NEXT: v_cndmask_b32_e64 v20, 1.0, v20, s[38:39] |
| ; GCN-NEXT: v_cndmask_b32_e64 v21, 1.0, v21, s[40:41] |
| ; GCN-NEXT: v_cndmask_b32_e64 v22, 1.0, v22, s[42:43] |
| ; GCN-NEXT: v_cndmask_b32_e64 v23, 1.0, v23, s[44:45] |
| ; GCN-NEXT: v_cndmask_b32_e64 v24, 1.0, v24, s[46:47] |
| ; GCN-NEXT: v_cndmask_b32_e64 v25, 1.0, v25, s[48:49] |
| ; GCN-NEXT: v_cndmask_b32_e64 v26, 1.0, v26, s[50:51] |
| ; GCN-NEXT: v_cndmask_b32_e64 v27, 1.0, v27, s[52:53] |
| ; GCN-NEXT: v_cndmask_b32_e64 v28, 1.0, v28, s[54:55] |
| ; GCN-NEXT: v_cndmask_b32_e64 v29, 1.0, v29, s[56:57] |
| ; GCN-NEXT: v_cndmask_b32_e64 v30, 1.0, v30, s[58:59] |
| ; GCN-NEXT: v_cndmask_b32_e64 v31, 1.0, v31, s[60:61] |
| ; GCN-NEXT: ; return to shader part epilog |
| entry: |
| %v = insertelement <32 x float> %vec, float 1.000000e+00, i32 %sel |
| ret <32 x float> %v |
| } |
| |
| define <8 x double> @double8_inselt_vec(<8 x double> %vec, i32 %sel) { |
| ; GCN-LABEL: double8_inselt_vec: |
| ; GCN: ; %bb.0: ; %entry |
| ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16 |
| ; GCN-NEXT: v_mov_b32_e32 v17, 0x3ff00000 |
| ; GCN-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v12, v12, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc |
| ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 7, v16 |
| ; GCN-NEXT: v_cndmask_b32_e64 v14, v14, 0, vcc |
| ; GCN-NEXT: v_cndmask_b32_e32 v15, v15, v17, vcc |
| ; GCN-NEXT: s_setpc_b64 s[30:31] |
| entry: |
| %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel |
| ret <8 x double> %v |
| } |