| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GPRIDX %s |
| ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=MOVREL %s |
| ; RUN: not --crash llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck -check-prefix=ERR %s |
| |
| ; FIXME: Need constant bus fixup pre-gfx10 for movrel |
| ; ERR: Bad machine code: VOP* instruction violates constant bus restriction |
| |
| define amdgpu_ps <8 x i32> @dyn_insertelement_v8i32_s_s_s(<8 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 |
| ; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 |
| ; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x i32> %vec, i32 %val, i32 %idx |
| ret <8 x i32> %insert |
| } |
| |
| define amdgpu_ps <8 x i8 addrspace(3)*> @dyn_insertelement_v8p3i8_s_s_s(<8 x i8 addrspace(3)*> inreg %vec, i8 addrspace(3)* inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8p3i8_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 |
| ; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8p3i8_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 |
| ; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx |
| ret <8 x i8 addrspace(3)*> %insert |
| } |
| |
| define <8 x float> @dyn_insertelement_v8f32_const_s_v_v(float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_const_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GPRIDX-NEXT: s_mov_b32 s11, 0x41000000 |
| ; GPRIDX-NEXT: s_mov_b32 s10, 0x40e00000 |
| ; GPRIDX-NEXT: s_mov_b32 s9, 0x40c00000 |
| ; GPRIDX-NEXT: s_mov_b32 s8, 0x40a00000 |
| ; GPRIDX-NEXT: s_mov_b32 s7, 4.0 |
| ; GPRIDX-NEXT: s_mov_b32 s6, 0x40400000 |
| ; GPRIDX-NEXT: s_mov_b32 s5, 2.0 |
| ; GPRIDX-NEXT: s_mov_b32 s4, 1.0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s4 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s7 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s10 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v9 |
| ; GPRIDX-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_const_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_mov_b32 s11, 0x41000000 |
| ; MOVREL-NEXT: s_mov_b32 s4, 1.0 |
| ; MOVREL-NEXT: s_mov_b32 s10, 0x40e00000 |
| ; MOVREL-NEXT: s_mov_b32 s9, 0x40c00000 |
| ; MOVREL-NEXT: s_mov_b32 s8, 0x40a00000 |
| ; MOVREL-NEXT: s_mov_b32 s7, 4.0 |
| ; MOVREL-NEXT: s_mov_b32 s6, 0x40400000 |
| ; MOVREL-NEXT: s_mov_b32 s5, 2.0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s4 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s10 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc_lo |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 |
| ; MOVREL-NEXT: s_setpc_b64 s[30:31] |
| entry: |
| %insert = insertelement <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_v(<8 x float> inreg %vec, float inreg %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s10 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v9, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v10, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v11, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v12, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v13, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v14, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v9, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v10, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v11, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v12, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v13, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v14, s10, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v15, s10, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_v_s(<8 x float> inreg %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v9, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s10, 7 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v9, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s10, 7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc_lo |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_s(<8 x float> %vec, float inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s3, 7 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 7 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, s0 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_v_v(<8 x float> inreg %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v9 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v15, v0, vcc_lo |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_v(<8 x float> %vec, float inreg %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_s_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_s(<8 x float> %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 7 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8p3i8_v_v_s(<8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8p3i8_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 7 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8p3i8_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx |
| %cast.0 = ptrtoint <8 x i8 addrspace(3)*> %insert to <8 x i32> |
| %cast.1 = bitcast <8 x i32> %cast.0 to <8 x float> |
| ret <8 x float> %cast.1 |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v(<8 x float> %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x i64> @dyn_insertelement_v8i64_s_s_s(<8 x i64> inreg %vec, i64 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8i64_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s20 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[18:19] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8i64_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 m0, s20 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_movreld_b64 s[0:1], s[18:19] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x i64> %vec, i64 %val, i32 %idx |
| ret <8 x i64> %insert |
| } |
| |
| define amdgpu_ps <8 x i8 addrspace(1)*> @dyn_insertelement_v8p1i8_s_s_s(<8 x i8 addrspace(1)*> inreg %vec, i8 addrspace(1)* inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8p1i8_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s20 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[18:19] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8p1i8_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 m0, s20 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_movreld_b64 s[0:1], s[18:19] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <8 x i8 addrspace(1)*> %vec, i8 addrspace(1)* %val, i32 %idx |
| ret <8 x i8 addrspace(1)*> %insert |
| } |
| |
| define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_const_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; GPRIDX-NEXT: s_mov_b32 s18, 0 |
| ; GPRIDX-NEXT: s_mov_b64 s[4:5], 1.0 |
| ; GPRIDX-NEXT: s_mov_b32 s19, 0x40200000 |
| ; GPRIDX-NEXT: s_mov_b32 s17, 0x401c0000 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s15, 0x40180000 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s13, 0x40140000 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s18 |
| ; GPRIDX-NEXT: s_mov_b64 s[10:11], 4.0 |
| ; GPRIDX-NEXT: s_mov_b32 s9, 0x40080000 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s18 |
| ; GPRIDX-NEXT: s_mov_b64 s[6:7], 2.0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s19 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[16:17], 0, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 2, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 3, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 4, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 5, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[14:15], 7, v2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v0, s[16:17] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v1, s[16:17] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v1, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v0, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v0, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v0, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v17, v17, v0, s[14:15] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v1, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v1, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v1, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v16, v16, v1, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v18, v18, v1, s[14:15] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[3:6], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[7:10], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[11:14], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[15:18], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_setpc_b64 s[30:31] |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_const_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_mov_b32 s18, 0 |
| ; MOVREL-NEXT: s_mov_b64 s[4:5], 1.0 |
| ; MOVREL-NEXT: s_mov_b32 s19, 0x40200000 |
| ; MOVREL-NEXT: s_mov_b32 s17, 0x401c0000 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s15, 0x40180000 |
| ; MOVREL-NEXT: s_mov_b32 s14, s18 |
| ; MOVREL-NEXT: s_mov_b32 s13, 0x40140000 |
| ; MOVREL-NEXT: s_mov_b32 s12, s18 |
| ; MOVREL-NEXT: s_mov_b64 s[10:11], 4.0 |
| ; MOVREL-NEXT: s_mov_b32 s9, 0x40080000 |
| ; MOVREL-NEXT: s_mov_b32 s8, s18 |
| ; MOVREL-NEXT: s_mov_b64 s[6:7], 2.0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s19 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 1, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s5, 3, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s10, 2, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s6, 4, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s7, 5, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s8, 6, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s9, 7, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v0, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v1, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, v0, s10 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v0, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v1, s10 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v1, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, v0, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, v0, s7 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, v1, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, v1, s7 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, v0, s8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v17, v17, v0, s9 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v16, v16, v1, s8 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v18, v18, v1, s9 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[3:6], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[7:10], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[11:14], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[15:18], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_setpc_b64 s[30:31] |
| entry: |
| %insert = insertelement <8 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0>, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_s_s_v(<8 x double> inreg %vec, double inreg %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_s_s_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 4, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 5, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s19 |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v17, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v17, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v17, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v17, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v0, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v0, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v16, v16, v0, s[10:11] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[1:4], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[5:8], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[9:12], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[13:16], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_s_s_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s18, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s19, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 2, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s18, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s19, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 4, v0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s2, 5, v0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s3, 6, v0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 7, v0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s18, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s19, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s18, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, s19, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, s18, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, s19, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, s18, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, s19, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, s18, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, s19, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, s18, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v16, v16, s19, s4 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[1:4], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[5:8], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[9:12], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[13:16], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_s_v_s(<8 x double> inreg %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 |
| ; GPRIDX-NEXT: s_lshl_b32 s0, s18, 1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, v1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[2:5], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[6:9], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[10:13], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[14:17], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s0 |
| ; MOVREL-NEXT: s_lshl_b32 m0, s18, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v3, v1 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[2:5], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[6:9], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[10:13], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[14:17], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_v_s_s(<8 x double> %vec, double inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_lshl_b32 s0, s4, 1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s3 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_v_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_lshl_b32 m0, s4, 1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, s2 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v1, s3 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_s_v_v(<8 x double> inreg %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 4, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 5, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v0, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v1, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v1, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v0, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v0, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v17, v17, v0, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v1, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v1, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v1, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v16, v16, v1, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v18, v18, v1, s[10:11] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[3:6], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[7:10], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[11:14], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[15:18], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 3, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s6, 2, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s2, 4, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s3, 5, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 6, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s5, 7, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v1, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, v0, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v0, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v1, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v1, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, v0, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, v0, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, v1, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, v1, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, v0, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v17, v17, v0, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v16, v16, v1, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v18, v18, v1, s5 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[3:6], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[7:10], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[11:14], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[15:18], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double inreg %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_s_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[14:15], 2, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v17, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v17, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v16, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v16, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v17, s[14:15] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v17, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v17, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v17, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v17, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v17, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v16, s[14:15] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v16, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v16, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v16, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v16, s[10:11] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_v_s_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, s3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, s2, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, s3, vcc_lo |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_v_v_s(<8 x double> %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_lshl_b32 s0, s2, 1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v17 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_lshl_b32 m0, s2, 1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, v16 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v1, v17 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v18 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v16, s[0:1] |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v18 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v17, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v16, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v16, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v16, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v16, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v16, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v17, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v17, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v17, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v17, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[10:11] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_v_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps <3 x i32> @dyn_insertelement_v3i32_s_s_s(<3 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v3i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s6, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s5, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s6, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s5, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s6, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s5, s4 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v3i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s6, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s5, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s6, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s5, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s6, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s5, s4 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <3 x i32> %vec, i32 %val, i32 %idx |
| ret <3 x i32> %insert |
| } |
| |
| define amdgpu_ps <3 x float> @dyn_insertelement_v3i32_v_v_s(<3 x float> %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v3i32_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v3i32_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <3 x float> %vec, float %val, i32 %idx |
| ret <3 x float> %insert |
| } |
| |
| define amdgpu_ps <5 x i32> @dyn_insertelement_v5i32_s_s_s(<5 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s8, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s7, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s8, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s7, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s8, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s7, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s8, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s7, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s8, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s7, s6 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s8, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s7, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s8, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s7, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s8, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s7, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s8, 3 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s7, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s8, 4 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s7, s6 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x i32> %vec, i32 %val, i32 %idx |
| ret <5 x i32> %insert |
| } |
| |
| define amdgpu_ps <5 x float> @dyn_insertelement_v5i32_v_v_s(<5 x float> %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5i32_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5i32_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x float> %vec, float %val, i32 %idx |
| ret <5 x float> %insert |
| } |
| |
| define amdgpu_ps <32 x i32> @dyn_insertelement_v32i32_s_s_s(<32 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v32i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s35 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b32 s0, s34 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v32i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 m0, s35 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: s_movreld_b32 s0, s34 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <32 x i32> %vec, i32 %val, i32 %idx |
| ret <32 x i32> %insert |
| } |
| |
| define amdgpu_ps <32 x float> @dyn_insertelement_v32i32_v_v_s(<32 x float> %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v32i32_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v32 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v32i32_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 m0, s2 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, v32 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <32 x float> %vec, float %val, i32 %idx |
| ret <32 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_s_add_1(<8 x float> inreg %vec, float inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_s_add_1: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_add_i32 s11, s11, 1 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 |
| ; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_s_add_1: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_add_i32 s11, s11, 1 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %idx.add = add i32 %idx, 1 |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx.add |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_s_add_7(<8 x float> inreg %vec, float inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_s_add_7: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_add_i32 s11, s11, 7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 |
| ; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_s_add_7: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_add_i32 s11, s11, 7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %idx.add = add i32 %idx, 7 |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx.add |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v_add_1(<8 x float> %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v_add_1: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_add_u32_e32 v9, 1, v9 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v_add_1: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_add_nc_u32_e32 v9, 1, v9 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %idx.add = add i32 %idx, 1 |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx.add |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v_add_7(<8 x float> %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v_add_7: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_add_u32_e32 v9, 7, v9 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v_add_7: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_add_nc_u32_e32 v9, 7, v9 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %idx.add = add i32 %idx, 7 |
| %insert = insertelement <8 x float> %vec, float %val, i32 %idx.add |
| ret <8 x float> %insert |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_s_s_s_add_1(<8 x double> inreg %vec, double inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_s_s_s_add_1: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s20 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[2:3], s[18:19] |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s7 |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s11 |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s15 |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_s_s_s_add_1: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 m0, s20 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_movreld_b64 s[2:3], s[18:19] |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s15 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %idx.add = add i32 %idx, 1 |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx.add |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_v_add_1: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_add_u32_e32 v18, 1, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v18 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v16, s[0:1] |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v18 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v18 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v17, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v16, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v16, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v16, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v16, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v16, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v17, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v17, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v17, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v17, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[12:13] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[10:11] |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; GPRIDX-NEXT: s_waitcnt vmcnt(0) |
| ; GPRIDX-NEXT: s_endpgm |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v8f64_v_v_v_add_1: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_add_nc_u32_e32 v18, 1, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[0:3], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[4:7], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[8:11], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: global_store_dwordx4 v[0:1], v[12:15], off |
| ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 |
| ; MOVREL-NEXT: s_endpgm |
| entry: |
| %idx.add = add i32 %idx, 1 |
| %insert = insertelement <8 x double> %vec, double %val, i32 %idx.add |
| %vec.0 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 0, i32 1> |
| %vec.1 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 2, i32 3> |
| %vec.2 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 4, i32 5> |
| %vec.3 = shufflevector <8 x double> %insert, <8 x double> undef, <2 x i32> <i32 6, i32 7> |
| store volatile <2 x double> %vec.0, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.1, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.2, <2 x double> addrspace(1)* undef |
| store volatile <2 x double> %vec.3, <2 x double> addrspace(1)* undef |
| ret void |
| } |
| |
| define amdgpu_ps <16 x i32> @dyn_insertelement_v16i32_s_s_s(<16 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s19 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b32 s0, s18 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 m0, s19 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_movreld_b32 s0, s18 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x i32> %vec, i32 %val, i32 %idx |
| ret <16 x i32> %insert |
| } |
| |
| define amdgpu_ps <16 x float> @dyn_insertelement_v16f32_s_s_s(<16 x float> inreg %vec, float inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16f32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s19 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b32 s0, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s15 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16f32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 m0, s19 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_movreld_b32 s0, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s15 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x float> %vec, float %val, i32 %idx |
| ret <16 x float> %insert |
| } |
| |
| define amdgpu_ps <32 x float> @dyn_insertelement_v32f32_s_s_s(<32 x float> inreg %vec, float inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v32f32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s35 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b32 s0, s34 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v19, s19 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v20, s20 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v21, s21 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v22, s22 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v23, s23 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v24, s24 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v25, s25 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v26, s26 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v27, s27 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v28, s28 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v29, s29 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v30, s30 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v31, s31 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v32f32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 m0, s35 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: s_movreld_b32 s0, s34 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v19, s19 |
| ; MOVREL-NEXT: v_mov_b32_e32 v20, s20 |
| ; MOVREL-NEXT: v_mov_b32_e32 v21, s21 |
| ; MOVREL-NEXT: v_mov_b32_e32 v22, s22 |
| ; MOVREL-NEXT: v_mov_b32_e32 v23, s23 |
| ; MOVREL-NEXT: v_mov_b32_e32 v24, s24 |
| ; MOVREL-NEXT: v_mov_b32_e32 v25, s25 |
| ; MOVREL-NEXT: v_mov_b32_e32 v26, s26 |
| ; MOVREL-NEXT: v_mov_b32_e32 v27, s27 |
| ; MOVREL-NEXT: v_mov_b32_e32 v28, s28 |
| ; MOVREL-NEXT: v_mov_b32_e32 v29, s29 |
| ; MOVREL-NEXT: v_mov_b32_e32 v30, s30 |
| ; MOVREL-NEXT: v_mov_b32_e32 v31, s31 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <32 x float> %vec, float %val, i32 %idx |
| ret <32 x float> %insert |
| } |
| |
| define amdgpu_ps <16 x i64> @dyn_insertelement_v16i64_s_s_s(<16 x i64> inreg %vec, i64 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16i64_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s36 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[34:35] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16i64_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 m0, s36 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: s_movreld_b64 s[0:1], s[34:35] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x i64> %vec, i64 %val, i32 %idx |
| ret <16 x i64> %insert |
| } |
| |
| define amdgpu_ps <16 x double> @dyn_insertelement_v16f64_s_s_s(<16 x double> inreg %vec, double inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16f64_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s36 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[34:35] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16f64_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 m0, s36 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: s_movreld_b64 s[0:1], s[34:35] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x double> %vec, double %val, i32 %idx |
| ret <16 x double> %insert |
| } |
| |
| define amdgpu_ps <16 x i32> @dyn_insertelement_v16i32_s_v_s(<16 x i32> inreg %vec, i32 %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16i32_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s0 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s18, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v0 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v1 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v13 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v14 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s14, v15 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s15, v16 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16i32_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s0 |
| ; MOVREL-NEXT: s_mov_b32 m0, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v1, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v13 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v14 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s14, v15 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s15, v16 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x i32> %vec, i32 %val, i32 %idx |
| ret <16 x i32> %insert |
| } |
| |
| define amdgpu_ps <16 x float> @dyn_insertelement_v16f32_s_v_s(<16 x float> inreg %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16f32_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s15 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s18, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v16 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16f32_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: s_mov_b32 m0, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s15 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, v16 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x float> %vec, float %val, i32 %idx |
| ret <16 x float> %insert |
| } |
| |
| define amdgpu_ps <32 x float> @dyn_insertelement_v32f32_s_v_s(<32 x float> inreg %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v32f32_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v32, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v19, s19 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v20, s20 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v21, s21 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v22, s22 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v23, s23 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v24, s24 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v25, s25 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v26, s26 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v27, s27 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v28, s28 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v29, s29 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v30, s30 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v31, s31 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s34, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v32 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v32f32_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: v_mov_b32_e32 v32, v0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 |
| ; MOVREL-NEXT: s_mov_b32 m0, s34 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v19, s19 |
| ; MOVREL-NEXT: v_mov_b32_e32 v20, s20 |
| ; MOVREL-NEXT: v_mov_b32_e32 v21, s21 |
| ; MOVREL-NEXT: v_mov_b32_e32 v22, s22 |
| ; MOVREL-NEXT: v_mov_b32_e32 v23, s23 |
| ; MOVREL-NEXT: v_mov_b32_e32 v24, s24 |
| ; MOVREL-NEXT: v_mov_b32_e32 v25, s25 |
| ; MOVREL-NEXT: v_mov_b32_e32 v26, s26 |
| ; MOVREL-NEXT: v_mov_b32_e32 v27, s27 |
| ; MOVREL-NEXT: v_mov_b32_e32 v28, s28 |
| ; MOVREL-NEXT: v_mov_b32_e32 v29, s29 |
| ; MOVREL-NEXT: v_mov_b32_e32 v30, s30 |
| ; MOVREL-NEXT: v_mov_b32_e32 v31, s31 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, v32 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <32 x float> %vec, float %val, i32 %idx |
| ret <32 x float> %insert |
| } |
| |
| define amdgpu_ps <16 x i64> @dyn_insertelement_v16i64_s_v_s(<16 x i64> inreg %vec, i64 %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16i64_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v33, s31 |
| ; GPRIDX-NEXT: s_lshl_b32 s33, s34, 1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v32, s30 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v31, s29 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v30, s28 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v29, s27 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v28, s26 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v27, s25 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v26, s24 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v25, s23 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v24, s22 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v23, s21 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v22, s20 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v21, s19 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v20, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v19, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s33, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, v1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v13 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v14 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v15 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s14, v16 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s15, v17 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s16, v18 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s17, v19 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s18, v20 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s19, v21 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s20, v22 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s21, v23 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s22, v24 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s23, v25 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s24, v26 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s25, v27 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s26, v28 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s27, v29 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s28, v30 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s29, v31 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s30, v32 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s31, v33 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16i64_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: v_mov_b32_e32 v33, s31 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s0 |
| ; MOVREL-NEXT: s_lshl_b32 m0, s34, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v32, s30 |
| ; MOVREL-NEXT: v_mov_b32_e32 v31, s29 |
| ; MOVREL-NEXT: v_mov_b32_e32 v30, s28 |
| ; MOVREL-NEXT: v_mov_b32_e32 v29, s27 |
| ; MOVREL-NEXT: v_mov_b32_e32 v28, s26 |
| ; MOVREL-NEXT: v_mov_b32_e32 v27, s25 |
| ; MOVREL-NEXT: v_mov_b32_e32 v26, s24 |
| ; MOVREL-NEXT: v_mov_b32_e32 v25, s23 |
| ; MOVREL-NEXT: v_mov_b32_e32 v24, s22 |
| ; MOVREL-NEXT: v_mov_b32_e32 v23, s21 |
| ; MOVREL-NEXT: v_mov_b32_e32 v22, s20 |
| ; MOVREL-NEXT: v_mov_b32_e32 v21, s19 |
| ; MOVREL-NEXT: v_mov_b32_e32 v20, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v19, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v3, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v13 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v14 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v15 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s14, v16 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s15, v17 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s16, v18 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s17, v19 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s18, v20 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s19, v21 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s20, v22 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s21, v23 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s22, v24 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s23, v25 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s24, v26 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s25, v27 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s26, v28 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s27, v29 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s28, v30 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s29, v31 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s30, v32 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s31, v33 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x i64> %vec, i64 %val, i32 %idx |
| ret <16 x i64> %insert |
| } |
| |
| define amdgpu_ps <16 x double> @dyn_insertelement_v16f64_s_v_s(<16 x double> inreg %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v16f64_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 s15, s17 |
| ; GPRIDX-NEXT: s_mov_b32 s17, s19 |
| ; GPRIDX-NEXT: s_mov_b32 s19, s21 |
| ; GPRIDX-NEXT: s_mov_b32 s21, s23 |
| ; GPRIDX-NEXT: s_mov_b32 s23, s25 |
| ; GPRIDX-NEXT: s_mov_b32 s25, s27 |
| ; GPRIDX-NEXT: s_mov_b32 s27, s29 |
| ; GPRIDX-NEXT: s_mov_b32 s29, s31 |
| ; GPRIDX-NEXT: s_mov_b32 s31, s33 |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s14, s16 |
| ; GPRIDX-NEXT: s_mov_b32 s16, s18 |
| ; GPRIDX-NEXT: s_mov_b32 s18, s20 |
| ; GPRIDX-NEXT: s_mov_b32 s20, s22 |
| ; GPRIDX-NEXT: s_mov_b32 s22, s24 |
| ; GPRIDX-NEXT: s_mov_b32 s24, s26 |
| ; GPRIDX-NEXT: s_mov_b32 s26, s28 |
| ; GPRIDX-NEXT: s_mov_b32 s28, s30 |
| ; GPRIDX-NEXT: s_mov_b32 s30, s32 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v33, s31 |
| ; GPRIDX-NEXT: s_lshl_b32 s33, s34, 1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v32, s30 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v31, s29 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v30, s28 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v29, s27 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v28, s26 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v27, s25 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v26, s24 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v25, s23 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v24, s22 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v23, s21 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v22, s20 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v21, s19 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v20, s18 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v19, s17 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s16 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s33, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, v1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v13 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v14 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v15 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s14, v16 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s15, v17 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s16, v18 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s17, v19 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s18, v20 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s19, v21 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s20, v22 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s21, v23 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s22, v24 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s23, v25 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s24, v26 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s25, v27 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s26, v28 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s27, v29 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s28, v30 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s29, v31 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s30, v32 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s31, v33 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v16f64_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_mov_b32 s15, s17 |
| ; MOVREL-NEXT: s_mov_b32 s17, s19 |
| ; MOVREL-NEXT: s_mov_b32 s19, s21 |
| ; MOVREL-NEXT: s_mov_b32 s21, s23 |
| ; MOVREL-NEXT: s_mov_b32 s23, s25 |
| ; MOVREL-NEXT: s_mov_b32 s25, s27 |
| ; MOVREL-NEXT: s_mov_b32 s27, s29 |
| ; MOVREL-NEXT: s_mov_b32 s29, s31 |
| ; MOVREL-NEXT: s_mov_b32 s31, s33 |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s14, s16 |
| ; MOVREL-NEXT: s_mov_b32 s16, s18 |
| ; MOVREL-NEXT: s_mov_b32 s18, s20 |
| ; MOVREL-NEXT: s_mov_b32 s20, s22 |
| ; MOVREL-NEXT: s_mov_b32 s22, s24 |
| ; MOVREL-NEXT: s_mov_b32 s24, s26 |
| ; MOVREL-NEXT: s_mov_b32 s26, s28 |
| ; MOVREL-NEXT: s_mov_b32 s28, s30 |
| ; MOVREL-NEXT: s_mov_b32 s30, s32 |
| ; MOVREL-NEXT: v_mov_b32_e32 v33, s31 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s0 |
| ; MOVREL-NEXT: s_lshl_b32 m0, s34, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v32, s30 |
| ; MOVREL-NEXT: v_mov_b32_e32 v31, s29 |
| ; MOVREL-NEXT: v_mov_b32_e32 v30, s28 |
| ; MOVREL-NEXT: v_mov_b32_e32 v29, s27 |
| ; MOVREL-NEXT: v_mov_b32_e32 v28, s26 |
| ; MOVREL-NEXT: v_mov_b32_e32 v27, s25 |
| ; MOVREL-NEXT: v_mov_b32_e32 v26, s24 |
| ; MOVREL-NEXT: v_mov_b32_e32 v25, s23 |
| ; MOVREL-NEXT: v_mov_b32_e32 v24, s22 |
| ; MOVREL-NEXT: v_mov_b32_e32 v23, s21 |
| ; MOVREL-NEXT: v_mov_b32_e32 v22, s20 |
| ; MOVREL-NEXT: v_mov_b32_e32 v21, s19 |
| ; MOVREL-NEXT: v_mov_b32_e32 v20, s18 |
| ; MOVREL-NEXT: v_mov_b32_e32 v19, s17 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s16 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v3, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v13 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v14 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v15 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s14, v16 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s15, v17 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s16, v18 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s17, v19 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s18, v20 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s19, v21 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s20, v22 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s21, v23 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s22, v24 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s23, v25 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s24, v26 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s25, v27 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s26, v28 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s27, v29 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s28, v30 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s29, v31 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s30, v32 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s31, v33 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <16 x double> %vec, double %val, i32 %idx |
| ret <16 x double> %insert |
| } |
| |
| define amdgpu_ps <7 x i32> @dyn_insertelement_v7i32_s_s_s(<7 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7i32_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s9, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s9, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s9, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s9, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s9, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s9, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s9, s8 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7i32_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s9, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s9, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s9, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 3 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s9, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 4 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s9, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 5 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s9, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 6 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s9, s8 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x i32> %vec, i32 %val, i32 %idx |
| ret <7 x i32> %insert |
| } |
| |
| define amdgpu_ps <7 x i8 addrspace(3)*> @dyn_insertelement_v7p3i8_s_s_s(<7 x i8 addrspace(3)*> inreg %vec, i8 addrspace(3)* inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7p3i8_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 0 |
| ; GPRIDX-NEXT: s_cselect_b32 s0, s9, s2 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 1 |
| ; GPRIDX-NEXT: s_cselect_b32 s1, s9, s3 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 2 |
| ; GPRIDX-NEXT: s_cselect_b32 s2, s9, s4 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 3 |
| ; GPRIDX-NEXT: s_cselect_b32 s3, s9, s5 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 4 |
| ; GPRIDX-NEXT: s_cselect_b32 s4, s9, s6 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 5 |
| ; GPRIDX-NEXT: s_cselect_b32 s5, s9, s7 |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s10, 6 |
| ; GPRIDX-NEXT: s_cselect_b32 s6, s9, s8 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7p3i8_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 0 |
| ; MOVREL-NEXT: s_cselect_b32 s0, s9, s2 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 1 |
| ; MOVREL-NEXT: s_cselect_b32 s1, s9, s3 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 2 |
| ; MOVREL-NEXT: s_cselect_b32 s2, s9, s4 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 3 |
| ; MOVREL-NEXT: s_cselect_b32 s3, s9, s5 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 4 |
| ; MOVREL-NEXT: s_cselect_b32 s4, s9, s6 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 5 |
| ; MOVREL-NEXT: s_cselect_b32 s5, s9, s7 |
| ; MOVREL-NEXT: s_cmp_eq_u32 s10, 6 |
| ; MOVREL-NEXT: s_cselect_b32 s6, s9, s8 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx |
| ret <7 x i8 addrspace(3)*> %insert |
| } |
| |
| define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_s_v_s(<7 x float> inreg %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f32_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v8, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v9, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v10, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v11, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v12, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s9, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v13, v0, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v7 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f32_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 0 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v8, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v9, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v10, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v12, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s9, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v13, v0, vcc_lo |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v7 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x float> %vec, float %val, i32 %idx |
| ret <7 x float> %insert |
| } |
| |
| define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_s_v_v(<7 x float> inreg %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f32_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v9, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v7 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f32_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v9, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v10, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v12, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v13, v0, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v1, v7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v14, v0, vcc_lo |
| ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x float> %vec, float %val, i32 %idx |
| ret <7 x float> %insert |
| } |
| |
| define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_v_v_s(<7 x float> %vec, float %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f32_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 5 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 6 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f32_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 6 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x float> %vec, float %val, i32 %idx |
| ret <7 x float> %insert |
| } |
| |
| define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_v_v_v(<7 x float> %vec, float %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f32_v_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v8 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f32_v_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v8 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x float> %vec, float %val, i32 %idx |
| ret <7 x float> %insert |
| } |
| |
| define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_s_s(<7 x double> inreg %vec, double inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f64_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: s_mov_b32 m0, s18 |
| ; GPRIDX-NEXT: s_nop 0 |
| ; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[16:17] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f64_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 m0, s18 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: s_movreld_b64 s[0:1], s[16:17] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x double> %vec, double %val, i32 %idx |
| ret <7 x double> %insert |
| } |
| |
| define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_s(<7 x double> inreg %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f64_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 |
| ; GPRIDX-NEXT: s_lshl_b32 s0, s16, 1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, v1 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v13 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v14 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v15 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f64_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s0 |
| ; MOVREL-NEXT: s_lshl_b32 m0, s16, 1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v3, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v13 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v14 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v15 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x double> %vec, double %val, i32 %idx |
| ret <7 x double> %insert |
| } |
| |
| define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f64_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: s_mov_b32 s10, s12 |
| ; GPRIDX-NEXT: s_mov_b32 s11, s13 |
| ; GPRIDX-NEXT: s_mov_b32 s12, s14 |
| ; GPRIDX-NEXT: s_mov_b32 s13, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 4, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 5, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 1, v2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v5, v0, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v7, v0, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v9, v0, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v11, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v13, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v0, v15, v0, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v1, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v1, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v1, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v14, v1, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v1, v16, v1, s[8:9] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v13 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v1 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f64_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: s_mov_b32 s10, s12 |
| ; MOVREL-NEXT: s_mov_b32 s11, s13 |
| ; MOVREL-NEXT: s_mov_b32 s12, s14 |
| ; MOVREL-NEXT: s_mov_b32 s13, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 6, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v1, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 3, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v1, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v1, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 5, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v12, v1, vcc_lo |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v6 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v13, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v14, v1, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v15, v0, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v16, v1, s1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v13 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v1 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x double> %vec, double %val, i32 %idx |
| ret <7 x double> %insert |
| } |
| |
| define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_s(<7 x double> %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f64_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_lshl_b32 s0, s2, 1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, v15 |
| ; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) |
| ; GPRIDX-NEXT: v_mov_b32_e32 v0, v14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v1, v16 |
| ; GPRIDX-NEXT: s_set_gpr_idx_off |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v13 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f64_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, v15 |
| ; MOVREL-NEXT: s_lshl_b32 m0, s2, 1 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v0, v14 |
| ; MOVREL-NEXT: v_movreld_b32_e32 v1, v16 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v13 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x double> %vec, double %val, i32 %idx |
| ret <7 x double> %insert |
| } |
| |
| define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_v(<7 x double> %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v7f64_v_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v16 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 6, v16 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v14, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v14, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v14, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v14, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v14, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v14, s[10:11] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v15, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v15, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v15, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v15, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v15, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v15, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v15, s[10:11] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s10, v10 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s11, v11 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s12, v12 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s13, v13 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v7f64_v_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 2, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s2, 3, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s3, 4, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s4, 5, v16 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s5, 6, v16 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v14, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, v14, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, v14, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v14, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v14, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v10, v10, v14, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v12, v12, v14, s5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v15, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, v15, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v15, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, v15, s2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v15, s3 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v11, v11, v15, s4 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v13, v13, v15, s5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s10, v10 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s11, v11 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s12, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s13, v13 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <7 x double> %vec, double %val, i32 %idx |
| ret <7 x double> %insert |
| } |
| |
| define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_s_s(<5 x double> inreg %vec, double inreg %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_s_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s14, 0 |
| ; GPRIDX-NEXT: s_cselect_b64 s[0:1], s[12:13], s[2:3] |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s14, 1 |
| ; GPRIDX-NEXT: s_cselect_b64 s[2:3], s[12:13], s[4:5] |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s14, 2 |
| ; GPRIDX-NEXT: s_cselect_b64 s[4:5], s[12:13], s[6:7] |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s14, 3 |
| ; GPRIDX-NEXT: s_cselect_b64 s[6:7], s[12:13], s[8:9] |
| ; GPRIDX-NEXT: s_cmp_eq_u32 s14, 4 |
| ; GPRIDX-NEXT: s_cselect_b64 s[8:9], s[12:13], s[10:11] |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5f64_s_s_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_cmp_eq_u32 s14, 0 |
| ; MOVREL-NEXT: s_cselect_b64 s[0:1], s[12:13], s[2:3] |
| ; MOVREL-NEXT: s_cmp_eq_u32 s14, 1 |
| ; MOVREL-NEXT: s_cselect_b64 s[2:3], s[12:13], s[4:5] |
| ; MOVREL-NEXT: s_cmp_eq_u32 s14, 2 |
| ; MOVREL-NEXT: s_cselect_b64 s[4:5], s[12:13], s[6:7] |
| ; MOVREL-NEXT: s_cmp_eq_u32 s14, 3 |
| ; MOVREL-NEXT: s_cselect_b64 s[6:7], s[12:13], s[8:9] |
| ; MOVREL-NEXT: s_cmp_eq_u32 s14, 4 |
| ; MOVREL-NEXT: s_cselect_b64 s[8:9], s[12:13], s[10:11] |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x double> %vec, double %val, i32 %idx |
| ret <5 x double> %insert |
| } |
| |
| define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_v_s(<5 x double> inreg %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s12, 0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], s12, 1 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], s12, 3 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], s12, 4 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], s12, 2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v0, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v0, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v0, v10, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v1, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v1, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v1, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v1, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v1, v11, v1, s[4:5] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v1 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5f64_s_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v2, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s12, 0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s12, 1 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, s12, 4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, v0, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s12, 2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v1, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s12, 3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v4 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v9, v1, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v10, v0, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v11, v1, s1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v5 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v1 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x double> %vec, double %val, i32 %idx |
| ret <5 x double> %insert |
| } |
| |
| define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_v_v(<5 x double> inreg %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: s_mov_b32 s0, s2 |
| ; GPRIDX-NEXT: s_mov_b32 s1, s3 |
| ; GPRIDX-NEXT: s_mov_b32 s2, s4 |
| ; GPRIDX-NEXT: s_mov_b32 s3, s5 |
| ; GPRIDX-NEXT: s_mov_b32 s4, s6 |
| ; GPRIDX-NEXT: s_mov_b32 s5, s7 |
| ; GPRIDX-NEXT: s_mov_b32 s6, s8 |
| ; GPRIDX-NEXT: s_mov_b32 s7, s9 |
| ; GPRIDX-NEXT: s_mov_b32 s8, s10 |
| ; GPRIDX-NEXT: s_mov_b32 s9, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v18, s15 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v17, s14 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v16, s13 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v15, s12 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v14, s11 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v13, s10 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v12, s9 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v11, s8 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v10, s7 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v9, s6 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v8, s5 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v7, s4 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v6, s3 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v5, s2 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v4, s1 |
| ; GPRIDX-NEXT: v_mov_b32_e32 v3, s0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 4, v2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 1, v2 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v5, v0, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v7, v0, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v9, v0, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v0, v11, v0, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v1, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v1, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v10, v1, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[4:5] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v1 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5f64_s_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: s_mov_b32 s0, s2 |
| ; MOVREL-NEXT: s_mov_b32 s1, s3 |
| ; MOVREL-NEXT: s_mov_b32 s2, s4 |
| ; MOVREL-NEXT: s_mov_b32 s3, s5 |
| ; MOVREL-NEXT: s_mov_b32 s4, s6 |
| ; MOVREL-NEXT: s_mov_b32 s5, s7 |
| ; MOVREL-NEXT: s_mov_b32 s6, s8 |
| ; MOVREL-NEXT: s_mov_b32 s7, s9 |
| ; MOVREL-NEXT: s_mov_b32 s8, s10 |
| ; MOVREL-NEXT: s_mov_b32 s9, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v18, s15 |
| ; MOVREL-NEXT: v_mov_b32_e32 v17, s14 |
| ; MOVREL-NEXT: v_mov_b32_e32 v16, s13 |
| ; MOVREL-NEXT: v_mov_b32_e32 v15, s12 |
| ; MOVREL-NEXT: v_mov_b32_e32 v14, s11 |
| ; MOVREL-NEXT: v_mov_b32_e32 v13, s10 |
| ; MOVREL-NEXT: v_mov_b32_e32 v12, s9 |
| ; MOVREL-NEXT: v_mov_b32_e32 v11, s8 |
| ; MOVREL-NEXT: v_mov_b32_e32 v10, s7 |
| ; MOVREL-NEXT: v_mov_b32_e32 v9, s6 |
| ; MOVREL-NEXT: v_mov_b32_e32 v8, s5 |
| ; MOVREL-NEXT: v_mov_b32_e32 v7, s4 |
| ; MOVREL-NEXT: v_mov_b32_e32 v6, s3 |
| ; MOVREL-NEXT: v_mov_b32_e32 v5, s2 |
| ; MOVREL-NEXT: v_mov_b32_e32 v4, s1 |
| ; MOVREL-NEXT: v_mov_b32_e32 v3, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 1, v2 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s1, 4, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, v0, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, v1, s0 |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, 3, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v8, v1, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v8, v9, v0, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v9, v10, v1, s0 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v0, v11, v0, s1 |
| ; MOVREL-NEXT: v_cndmask_b32_e64 v1, v12, v1, s1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v7 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v1 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x double> %vec, double %val, i32 %idx |
| ret <5 x double> %insert |
| } |
| |
| define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec, double %val, i32 inreg %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_s: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], s2, 1 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], s2, 2 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], s2, 3 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], s2, 4 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v10, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v10, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[8:9] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v11, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[6:7] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5f64_v_v_s: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x double> %vec, double %val, i32 %idx |
| ret <5 x double> %insert |
| } |
| |
| define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec, double %val, i32 %idx) { |
| ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_v: |
| ; GPRIDX: ; %bb.0: ; %entry |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v12 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v12 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v12 |
| ; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v12 |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v10, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v10, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[6:7] |
| ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[0:1] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v11, s[4:5] |
| ; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[6:7] |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 |
| ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 |
| ; GPRIDX-NEXT: ; return to shader part epilog |
| ; |
| ; MOVREL-LABEL: dyn_insertelement_v5f64_v_v_v: |
| ; MOVREL: ; %bb.0: ; %entry |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v12 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc_lo |
| ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v12 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo |
| ; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo |
| ; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 |
| ; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 |
| ; MOVREL-NEXT: ; return to shader part epilog |
| entry: |
| %insert = insertelement <5 x double> %vec, double %val, i32 %idx |
| ret <5 x double> %insert |
| } |